From 41f0e3c31ffcdf07b54d8ae6d9de5da623a5f439 Mon Sep 17 00:00:00 2001 From: liuzhenhai93 Date: Thu, 30 Mar 2023 10:47:24 +0800 Subject: [PATCH] =?UTF-8?q?[AMP=20OP&Test]=20assign=20op=20add=20fp16=20?= =?UTF-8?q?=E3=80=81bfp16=20test=20(#52233)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add fp16 bfp16 test * polish * polish * polish --- .../fluid/tests/unittests/test_assign_op.py | 89 +++++++++++++------ 1 file changed, 63 insertions(+), 26 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 5efae3fdd5d6c..c7fc518986d35 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -18,6 +18,7 @@ import gradient_checker import numpy as np from decorator_helper import prog_scope +from eager_op_test import convert_float_to_uint16, convert_uint16_to_float import paddle from paddle import fluid @@ -47,6 +48,9 @@ def test_backward(self): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) class TestAssignFP16Op(eager_op_test.OpTest): def setUp(self): self.python_api = paddle.assign @@ -69,6 +73,32 @@ def test_backward(self): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "BFP16 test runs only on GPU" +) +class TestAssignBFP16Op(eager_op_test.OpTest): + def setUp(self): + self.python_api = paddle.assign + self.public_python_api = paddle.assign + self.op_type = "assign" + self.prim_op_type = "prim" + self.enable_cinn = False + x = np.random.uniform(0, 1, [100, 10]).astype(np.float32) + x = convert_float_to_uint16(x) + self.inputs = {'X': x} + self.outputs = {'Out': x} + + def test_forward(self): + paddle.enable_static() + self.check_output() + paddle.disable_static() + + def test_backward(self): + paddle.enable_static() + self.check_grad(['X'], 'Out', check_prim=True) + paddle.disable_static() + + class TestAssignOpWithLoDTensorArray(unittest.TestCase): def test_assign_LoDTensorArray(self): paddle.enable_static() @@ -160,32 +190,12 @@ def test_assign_LoDTensorArray(self): paddle.disable_static() def test_assign_NumpyArray(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.bool_) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray1(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.float32) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray2(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.int32) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray3(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.int64) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) + for dtype in [np.bool_, np.float32, np.int32, np.int64]: + with fluid.dygraph.guard(): + array = np.random.random(size=(100, 10)).astype(dtype) + result1 = paddle.zeros(shape=[3, 3], dtype='float32') + paddle.assign(array, result1) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_List(self): l = [1, 2, 3] @@ -232,6 +242,31 @@ def test_clone(self): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) +class TestAssignOApiFP16(unittest.TestCase): + def test_assign_fp16(self): + x = np.random.uniform(0, 10, [3, 3]).astype(np.float16) + x = paddle.to_tensor(x) + result = paddle.zeros(shape=[3, 3], dtype='float16') + paddle.assign(x, result) + np.testing.assert_equal(result.numpy(), x.numpy()) + + def test_assign_bfp16(self): + x_f = np.random.uniform(0, 10, [3, 3]).astype(np.float32) + x = convert_float_to_uint16(x_f) + x = paddle.to_tensor(x) + result = paddle.zeros(shape=[3, 3], dtype='bfloat16') + paddle.assign(x, result) + np.testing.assert_allclose( + convert_uint16_to_float(result.numpy()), x_f, rtol=1e-02 + ) + np.testing.assert_equal( + convert_uint16_to_float(result.numpy()), convert_uint16_to_float(x) + ) + + class TestAssignOpErrorApi(unittest.TestCase): def test_errors(self): paddle.enable_static() @@ -284,6 +319,7 @@ def test_grad(self): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) + paddle.disable_static() class TestAssignTripleGradCheck(unittest.TestCase): @@ -315,6 +351,7 @@ def test_grad(self): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) + paddle.disable_static() if __name__ == '__main__':