Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add 0d Tensor Test Cases for cond, case, switch_case #49544

Merged
merged 3 commits into from
Jan 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
161 changes: 161 additions & 0 deletions python/paddle/fluid/tests/unittests/test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,67 @@ def fn_3():
np.testing.assert_allclose(res[3], 2, rtol=1e-05)
np.testing.assert_allclose(res[4], 2, rtol=1e-05)

def test_0d_tensor(self):
def fn_1():
return paddle.full(shape=[], dtype='int32', fill_value=1)

def fn_2():
return paddle.full(shape=[], dtype='int32', fill_value=2)

def fn_3():
return paddle.full(shape=[], dtype='int32', fill_value=3)

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = paddle.full(shape=[], dtype='float32', fill_value=0.3)
y = paddle.full(shape=[], dtype='float32', fill_value=0.1)
z = paddle.full(shape=[], dtype='float32', fill_value=0.2)
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3

# call fn_1
out_0 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_1, fn_2)], default=fn_3
)

# call fn_2
out_1 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], default=fn_3
)

# call default fn_3
out_2 = paddle.static.nn.control_flow.case(
pred_fn_pairs=((pred_2, fn_1), (pred_2, fn_2)), default=fn_3
)

# no default, call fn_2
out_3 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_1, fn_2)]
)

# no default, call fn_2. but pred_2 is false
out_4 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_2, fn_2)]
)

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)

res = exe.run(
main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4]
)

np.testing.assert_allclose(res[0], 1, rtol=1e-05)
np.testing.assert_allclose(res[1], 2, rtol=1e-05)
np.testing.assert_allclose(res[2], 3, rtol=1e-05)
np.testing.assert_allclose(res[3], 2, rtol=1e-05)
np.testing.assert_allclose(res[4], 2, rtol=1e-05)

def test_return_var_tuple(self):
def fn_1():
return layers.fill_constant(
Expand Down Expand Up @@ -236,6 +297,106 @@ def fn_3():
np.testing.assert_allclose(res[1], 2, rtol=1e-05)
np.testing.assert_allclose(res[2], 3, rtol=1e-05)

def test_nested_0d_tensor(self):
def fn_1(x=1):
var_5 = paddle.full(shape=[], dtype='int32', fill_value=5)
var_6 = paddle.full(shape=[], dtype='int32', fill_value=6)
out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[
(
var_5 < var_6,
partial(
paddle.full,
shape=[],
dtype='int32',
fill_value=x,
),
),
(
var_5 == var_6,
partial(
paddle.full,
shape=[],
dtype='int32',
fill_value=x,
),
),
]
)
return out

def fn_2(x=2):
var_5 = paddle.full(shape=[], dtype='int32', fill_value=5)
var_6 = paddle.full(shape=[], dtype='int32', fill_value=6)
out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[
(var_5 < var_6, partial(fn_1, x=x)),
(
var_5 == var_6,
partial(
paddle.full,
shape=[],
dtype='int32',
fill_value=x,
),
),
]
)
return out

def fn_3():
var_5 = paddle.full(shape=[], dtype='int32', fill_value=5)
var_6 = paddle.full(shape=[], dtype='int32', fill_value=6)
out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[
(var_5 < var_6, partial(fn_2, x=3)),
(
var_5 == var_6,
partial(
paddle.full,
shape=[],
dtype='int32',
fill_value=7,
),
),
]
)
return out

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = paddle.full(shape=[], dtype='float32', fill_value=0.3)
y = paddle.full(shape=[], dtype='float32', fill_value=0.1)
z = paddle.full(shape=[], dtype='float32', fill_value=0.2)
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3

out_1 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
)

out_2 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], default=fn_3
)

out_3 = paddle.static.nn.control_flow.case(
pred_fn_pairs=[(x == y, fn_1), (x == z, fn_2)], default=fn_3
)

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)

res = exe.run(main_program, fetch_list=[out_1, out_2, out_3])

np.testing.assert_allclose(res[0], 1, rtol=1e-05)
np.testing.assert_allclose(res[1], 2, rtol=1e-05)
np.testing.assert_allclose(res[2], 3, rtol=1e-05)


class TestAPICase_Error(unittest.TestCase):
def test_error(self):
Expand Down
173 changes: 173 additions & 0 deletions python/paddle/fluid/tests/unittests/test_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,115 @@ def false_func():
np.asarray(ret), np.full((3, 2), -1, np.int32), rtol=1e-05
)

def test_return_0d_tensor(self):
"""
pseudocode:

if 0.23 >= 0.1:
return 2
else:
return -1
"""

paddle.enable_static()

def true_func():
return paddle.full(shape=[], dtype='int32', fill_value=2)

def false_func():
return paddle.full(shape=[], dtype='int32', fill_value=-1)

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
pred = paddle.greater_equal(y, x)
out = paddle.static.nn.cond(pred, true_func, false_func)
# out is one tensor

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
(ret,) = exe.run(main_program, fetch_list=[out.name])
np.testing.assert_allclose(np.asarray(ret), np.array(2), rtol=1e-05)

def test_0d_tensor_as_cond(self):
"""
pseudocode:

if 0.23 >= 0.1:
return 2
else:
return -1
"""

paddle.enable_static()

def true_func():
return paddle.full(shape=[3, 3], dtype='int32', fill_value=2)

def false_func():
return paddle.full(shape=[3, 3], dtype='int32', fill_value=-1)

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = paddle.full(shape=[], dtype='float32', fill_value=0.1)
y = paddle.full(shape=[], dtype='float32', fill_value=0.23)
pred = paddle.greater_equal(y, x)
out = paddle.static.nn.cond(pred, true_func, false_func)
# out is one tensor

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
(ret,) = exe.run(main_program, fetch_list=[out.name])
np.testing.assert_allclose(
np.asarray(ret), np.full((3, 3), 2, np.int32), rtol=1e-05
)

def test_0d_tensor_backward(self):
"""
pseudocode:

a = -2.0
if a >= 0:
return a
else:
return -a
"""

paddle.enable_static()

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
a = paddle.full(shape=[], dtype='float32', fill_value=-2.0)
a.stop_gradient = False
out = paddle.static.nn.cond(a >= 0, lambda: a, lambda: -a)
append_backward(out)

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out.name, a.grad_name])
np.testing.assert_allclose(
np.asarray(ret[0]), np.array(2.0), rtol=1e-05
)
np.testing.assert_allclose(
np.asarray(ret[1]), np.array(-1.0), rtol=1e-05
)

def test_return_var_tuple(self):
"""
pseudocode:
Expand Down Expand Up @@ -358,6 +467,70 @@ def greater_equal_branch(i, a):
self.assertEqual(ret[0][0], expected_ret)
self.assertEqual(ret[1][0], expected_a_grad)

def test_cond_inside_cond_0d_tensor(self):
"""
pseudocode:
i = 3.0
a = 2 * i
if i < 5:
if i >= 3:
return a + 1
else:
return 1 - a
else:
if i < 8:
return a * 2
else:
return a / 2
"""

paddle.enable_static()

def less_than_branch(i, a):
return paddle.static.nn.cond(
i >= 3.0,
lambda: a + 1,
lambda: 1 - a,
)

def greater_equal_branch(i, a):
return paddle.static.nn.cond(
i < 8.0,
lambda: a * 2,
lambda: a / 2,
)

main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = paddle.full(fill_value=3.0, shape=[], dtype='float32')
i.stop_gradient = False
a = 2.0 * i
out = paddle.static.nn.cond(
i < 5.0,
lambda: less_than_branch(i, a),
lambda: greater_equal_branch(i, a),
)
mean = paddle.mean(out)
append_backward(out)

place = (
fluid.CUDAPlace(0)
if core.is_compiled_with_cuda()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
ret = exe.run(
main_program,
fetch_list=[out.name, i.grad_name],
)
np.testing.assert_allclose(
np.asarray(ret[0]), np.array(7.0), rtol=1e-05
)
np.testing.assert_allclose(
np.asarray(ret[1]), np.array(2.0), rtol=1e-05
)

def test_cond_op_in_condition(self):
paddle.enable_static()
main_program = fluid.Program()
Expand Down
Loading