Skip to content

Commit

Permalink
[AMP OP&Test] assign op add fp16 、bfp16 test (#52233)
Browse files Browse the repository at this point in the history
* add fp16 bfp16 test

* polish

* polish

* polish
  • Loading branch information
liuzhenhai93 authored Mar 30, 2023
1 parent 3161e6c commit 41f0e3c
Showing 1 changed file with 63 additions and 26 deletions.
89 changes: 63 additions & 26 deletions python/paddle/fluid/tests/unittests/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from eager_op_test import convert_float_to_uint16, convert_uint16_to_float

import paddle
from paddle import fluid
Expand Down Expand Up @@ -47,6 +48,9 @@ def test_backward(self):
paddle.disable_static()


@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestAssignFP16Op(eager_op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
Expand All @@ -69,6 +73,32 @@ def test_backward(self):
paddle.disable_static()


@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "BFP16 test runs only on GPU"
)
class TestAssignBFP16Op(eager_op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0, 1, [100, 10]).astype(np.float32)
x = convert_float_to_uint16(x)
self.inputs = {'X': x}
self.outputs = {'Out': x}

def test_forward(self):
paddle.enable_static()
self.check_output()
paddle.disable_static()

def test_backward(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_prim=True)
paddle.disable_static()


class TestAssignOpWithLoDTensorArray(unittest.TestCase):
def test_assign_LoDTensorArray(self):
paddle.enable_static()
Expand Down Expand Up @@ -160,32 +190,12 @@ def test_assign_LoDTensorArray(self):
paddle.disable_static()

def test_assign_NumpyArray(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.bool_)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)

def test_assign_NumpyArray1(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.float32)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)

def test_assign_NumpyArray2(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.int32)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)

def test_assign_NumpyArray3(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.int64)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(dtype)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)

def test_assign_List(self):
l = [1, 2, 3]
Expand Down Expand Up @@ -232,6 +242,31 @@ def test_clone(self):
paddle.disable_static()


@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestAssignOApiFP16(unittest.TestCase):
def test_assign_fp16(self):
x = np.random.uniform(0, 10, [3, 3]).astype(np.float16)
x = paddle.to_tensor(x)
result = paddle.zeros(shape=[3, 3], dtype='float16')
paddle.assign(x, result)
np.testing.assert_equal(result.numpy(), x.numpy())

def test_assign_bfp16(self):
x_f = np.random.uniform(0, 10, [3, 3]).astype(np.float32)
x = convert_float_to_uint16(x_f)
x = paddle.to_tensor(x)
result = paddle.zeros(shape=[3, 3], dtype='bfloat16')
paddle.assign(x, result)
np.testing.assert_allclose(
convert_uint16_to_float(result.numpy()), x_f, rtol=1e-02
)
np.testing.assert_equal(
convert_uint16_to_float(result.numpy()), convert_uint16_to_float(x)
)


class TestAssignOpErrorApi(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
Expand Down Expand Up @@ -284,6 +319,7 @@ def test_grad(self):
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
paddle.disable_static()


class TestAssignTripleGradCheck(unittest.TestCase):
Expand Down Expand Up @@ -315,6 +351,7 @@ def test_grad(self):
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
paddle.disable_static()


if __name__ == '__main__':
Expand Down

0 comments on commit 41f0e3c

Please sign in to comment.