Skip to content

Commit

Permalink
reformat manually
Browse files Browse the repository at this point in the history
Signed-off-by: tiancaishaonvjituizi <[email protected]>
  • Loading branch information
tiancaishaonvjituizi committed Oct 24, 2022
1 parent c03f6f9 commit 5326864
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 30 deletions.
64 changes: 38 additions & 26 deletions python/paddle/fluid/tests/unittests/test_nan_to_num_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,23 @@
# from op_test import OpTest


def np_nan_to_num(x: np.ndarray,
nan: float = 0.0,
posinf: Optional[float] = None,
neginf: Optional[float] = None) -> np.ndarray:
def np_nan_to_num(
x: np.ndarray,
nan: float = 0.0,
posinf: Optional[float] = None,
neginf: Optional[float] = None,
) -> np.ndarray:
return np.nan_to_num(x, True, nan=nan, posinf=posinf, neginf=neginf)


def np_nan_to_num_op(x: np.ndarray, nan: float, replace_posinf_with_max: bool,
posinf: float, replace_neginf_with_min: bool,
neginf: float) -> np.ndarray:
def np_nan_to_num_op(
x: np.ndarray,
nan: float,
replace_posinf_with_max: bool,
posinf: float,
replace_neginf_with_min: bool,
neginf: float,
) -> np.ndarray:
if replace_posinf_with_max:
posinf = None
if replace_neginf_with_min:
Expand All @@ -45,25 +52,28 @@ def np_nan_to_num_grad(x: np.ndarray, dout: np.ndarray) -> np.ndarray:


class TestNanToNum(unittest.TestCase):

def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
self.place = (
paddle.CUDAPlace(0)
if core.is_compiled_with_cuda()
else paddle.CPUPlace()
)

def test_static(self):
x_np = np.array([[1, np.nan, -2], [np.inf, 0,
-np.inf]]).astype(np.float32)
x_np = np.array([[1, np.nan, -2], [np.inf, 0, -np.inf]]).astype(
np.float32
)
out1_np = np_nan_to_num(x_np)
out2_np = np_nan_to_num(x_np, 1.)
out3_np = np_nan_to_num(x_np, 1., 9.)
out4_np = np_nan_to_num(x_np, 1., 9., -12.)
out2_np = np_nan_to_num(x_np, 1.0)
out3_np = np_nan_to_num(x_np, 1.0, 9.0)
out4_np = np_nan_to_num(x_np, 1.0, 9.0, -12.0)
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', x_np.shape)
out1 = paddle.nan_to_num(x)
out2 = paddle.nan_to_num(x, 1.)
out3 = paddle.nan_to_num(x, 1., 9.)
out4 = paddle.nan_to_num(x, 1., 9., -12.)
out2 = paddle.nan_to_num(x, 1.0)
out3 = paddle.nan_to_num(x, 1.0, 9.0)
out4 = paddle.nan_to_num(x, 1.0, 9.0, -12.0)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2, out3, out4])

Expand All @@ -78,37 +88,39 @@ def test_dygraph(self):

with paddle.fluid.dygraph.guard():
# NOTE(tiancaishaonvjituizi): float64 input fails the test
x_np = np.array([[1, np.nan, -2], [np.inf, 0,
-np.inf]]).astype(np.float32)
# -np.inf]]).astype(np.float64)
x_np = np.array([[1, np.nan, -2], [np.inf, 0, -np.inf]]).astype(
np.float32
# np.float64
)
x_tensor = paddle.to_tensor(x_np, stop_gradient=False)

out_tensor = paddle.nan_to_num(x_tensor)
out_np = np_nan_to_num(x_np)
self.assertTrue(np.allclose(out_tensor.numpy(), out_np))

out_tensor = paddle.nan_to_num(x_tensor, 1., None, None)
out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, None)
out_np = np_nan_to_num(x_np, 1, None, None)
self.assertTrue(np.allclose(out_tensor.numpy(), out_np))

out_tensor = paddle.nan_to_num(x_tensor, 1., 2., None)
out_tensor = paddle.nan_to_num(x_tensor, 1.0, 2.0, None)
out_np = np_nan_to_num(x_np, 1, 2, None)
self.assertTrue(np.allclose(out_tensor.numpy(), out_np))

out_tensor = paddle.nan_to_num(x_tensor, 1., None, -10.)
out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, -10.0)
out_np = np_nan_to_num(x_np, 1, None, -10)
self.assertTrue(np.allclose(out_tensor.numpy(), out_np))

out_tensor = paddle.nan_to_num(x_tensor, 1., 100., -10.)
out_tensor = paddle.nan_to_num(x_tensor, 1.0, 100.0, -10.0)
out_np = np_nan_to_num(x_np, 1, 100, -10)
self.assertTrue(np.allclose(out_tensor.numpy(), out_np))

paddle.enable_static()

def test_check_grad(self):
paddle.disable_static(place=self.place)
x_np = np.array([[1, np.nan, -2], [np.inf, 0,
-np.inf]]).astype(np.float32)
x_np = np.array([[1, np.nan, -2], [np.inf, 0, -np.inf]]).astype(
np.float32
)
x_tensor = paddle.to_tensor(x_np, stop_gradient=False)

y = paddle.nan_to_num(x_tensor)
Expand Down
10 changes: 6 additions & 4 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1339,12 +1339,14 @@ def nan_to_num(x, nan=0.0, posinf=None, neginf=None, name=None):
assert x.dtype in [paddle.float32, paddle.float64]
is_float32 = x.dtype == paddle.float32
if posinf is None:
posinf = np.finfo(np.float32).max if is_float32 else np.finfo(
np.float64).max
posinf = (
np.finfo(np.float32).max if is_float32 else np.finfo(np.float64).max
)
posinf = paddle.full_like(x, posinf)
if neginf is None:
neginf = np.finfo(np.float32).min if is_float32 else np.finfo(
np.float64).min
neginf = (
np.finfo(np.float32).min if is_float32 else np.finfo(np.float64).min
)
neginf = paddle.full_like(x, neginf)
x = paddle.where(paddle.isnan(x), nan, x)
x = paddle.where(x == posinf_value, posinf, x)
Expand Down

0 comments on commit 5326864

Please sign in to comment.