diff --git a/paddle/phi/kernels/gpu/eye_kernel.cu b/paddle/phi/kernels/gpu/eye_kernel.cu index 5cd8e5caabe98..04735aaa228a6 100644 --- a/paddle/phi/kernels/gpu/eye_kernel.cu +++ b/paddle/phi/kernels/gpu/eye_kernel.cu @@ -13,7 +13,6 @@ // limitations under the License. #include "paddle/phi/kernels/eye_kernel.h" - #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/eye_kernel_impl.h" @@ -26,4 +25,5 @@ PD_REGISTER_KERNEL(eye, double, int64_t, int, - phi::dtype::float16) {} + phi::dtype::float16, + phi::dtype::bfloat16) {} diff --git a/paddle/phi/kernels/gpu/frame_grad_kernel.cu b/paddle/phi/kernels/gpu/frame_grad_kernel.cu index 7deb9ff04cdb3..f7b5d441f5c93 100644 --- a/paddle/phi/kernels/gpu/frame_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/frame_grad_kernel.cu @@ -13,7 +13,6 @@ // limitations under the License. #include "paddle/phi/kernels/frame_grad_kernel.h" - #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/complex.h" #include "paddle/phi/core/kernel_registry.h" @@ -28,5 +27,6 @@ PD_REGISTER_KERNEL(frame_grad, float, double, phi::dtype::float16, + phi::dtype::bfloat16, phi::dtype::complex, phi::dtype::complex) {} diff --git a/paddle/phi/kernels/gpu/frame_kernel.cu b/paddle/phi/kernels/gpu/frame_kernel.cu index 2506cd714be09..153e450576459 100644 --- a/paddle/phi/kernels/gpu/frame_kernel.cu +++ b/paddle/phi/kernels/gpu/frame_kernel.cu @@ -28,5 +28,6 @@ PD_REGISTER_KERNEL(frame, float, double, phi::dtype::float16, + phi::dtype::bfloat16, phi::dtype::complex, phi::dtype::complex) {} diff --git a/python/paddle/fluid/tests/unittests/test_eye_op.py b/python/paddle/fluid/tests/unittests/test_eye_op.py index 8756f18cc24de..b7ef848b42fad 100644 --- a/python/paddle/fluid/tests/unittests/test_eye_op.py +++ b/python/paddle/fluid/tests/unittests/test_eye_op.py @@ -21,29 +21,40 @@ import paddle from paddle import fluid -from paddle.fluid import framework +from paddle.fluid import core, framework from paddle.fluid.framework import Program, program_guard class TestEyeOp(OpTest): def setUp(self): ''' - Test eye op with specified shape + Test eye op with default shape ''' self.python_api = paddle.eye self.op_type = "eye" + self.init_dtype() + self.init_attrs() self.inputs = {} self.attrs = { - 'num_rows': 219, - 'num_columns': 319, - 'dtype': framework.convert_np_dtype_to_dtype_(np.int32), + 'num_rows': self.num_columns, + 'num_columns': self.num_columns, + 'dtype': framework.convert_np_dtype_to_dtype_(self.dtype), + } + self.outputs = { + 'Out': np.eye(self.num_rows, self.num_columns, dtype=self.dtype) } - self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)} def test_check_output(self): self.check_output() + def init_dtype(self): + self.dtype = np.int32 + + def init_attrs(self): + self.num_rows = 319 + self.num_columns = 319 + class TestEyeOp1(OpTest): def setUp(self): @@ -178,6 +189,35 @@ def test_error(self): paddle.eye(-1) +class TestEyeFP16OP(TestEyeOp): + '''Test eye op with specified dtype''' + + def init_dtype(self): + self.dtype = np.float16 + + +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not complied with CUDA and not support the bfloat16", +) +class TestEyeBF16OP(OpTest): + def setUp(self): + self.op_type = "eye" + self.dtype = np.uint16 + self.python_api = paddle.eye + self.inputs = {} + self.attrs = { + 'num_rows': 219, + 'num_columns': 319, + } + self.outputs = {'Out': np.eye(219, 319)} + + def test_check_output(self): + place = core.CUDAPlace(0) + self.check_output_with_place(place) + + if __name__ == "__main__": paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_frame_op.py b/python/paddle/fluid/tests/unittests/test_frame_op.py index f012ac06f38b8..8f166f443462b 100644 --- a/python/paddle/fluid/tests/unittests/test_frame_op.py +++ b/python/paddle/fluid/tests/unittests/test_frame_op.py @@ -15,10 +15,11 @@ import unittest import numpy as np -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 from numpy.lib.stride_tricks import as_strided import paddle +from paddle.fluid import core def frame_from_librosa(x, frame_length, hop_length, axis=-1): @@ -48,23 +49,28 @@ class TestFrameOp(OpTest): def setUp(self): self.op_type = "frame" self.python_api = paddle.signal.frame - self.shape, self.type, self.attrs = self.initTestCase() - self.inputs = { - 'X': np.random.random(size=self.shape).astype(self.type), - } + + self.init_dtype() + self.init_shape() + self.init_attrs() + + self.inputs = {'X': np.random.random(size=self.shape)} self.outputs = { 'Out': frame_from_librosa(x=self.inputs['X'], **self.attrs) } - def initTestCase(self): - input_shape = (150,) - input_type = 'float64' - attrs = { + def init_dtype(self): + self.dtype = 'float64' + + def init_shape(self): + self.shape = (150,) + + def init_attrs(self): + self.attrs = { 'frame_length': 50, 'hop_length': 15, 'axis': -1, } - return input_shape, input_type, attrs def test_check_output(self): paddle.enable_static() @@ -137,5 +143,50 @@ def initTestCase(self): return input_shape, input_type, attrs +class TestFrameFP16OP(TestFrameOp): + def init_dtype(self): + self.dtype = np.float16 + + +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not complied with CUDA and not support the bfloat16", +) +class TestFrameBF16OP(OpTest): + def setUp(self): + self.op_type = "frame" + self.python_api = paddle.signal.frame + self.shape, self.dtype, self.attrs = self.initTestCase() + x = np.random.random(size=self.shape).astype(np.float32) + out = frame_from_librosa(x, **self.attrs).copy() + self.inputs = { + 'X': convert_float_to_uint16(x), + } + self.outputs = {'Out': convert_float_to_uint16(out)} + + def initTestCase(self): + input_shape = (150,) + input_dtype = np.uint16 + attrs = { + 'frame_length': 50, + 'hop_length': 15, + 'axis': -1, + } + return input_shape, input_dtype, attrs + + def test_check_output(self): + paddle.enable_static() + place = core.CUDAPlace(0) + self.check_output_with_place(place) + paddle.disable_static() + + def test_check_grad_normal(self): + paddle.enable_static() + place = core.CUDAPlace(0) + self.check_grad_with_place(place, ['X'], 'Out') + paddle.disable_static() + + if __name__ == '__main__': unittest.main()