From aa2567504d448f0dad33284e8c34f69cdbd73e24 Mon Sep 17 00:00:00 2001 From: Feng Yuan Date: Wed, 27 Mar 2024 03:46:38 +0800 Subject: [PATCH] Add test_op::test_numpy_ref Signed-off-by: Feng Yuan --- test/xpu/test_ops.py | 58 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test/xpu/test_ops.py b/test/xpu/test_ops.py index 315dc4d5c..f2bfcc047 100644 --- a/test/xpu/test_ops.py +++ b/test/xpu/test_ops.py @@ -16,18 +16,40 @@ ops, ) from torch.testing._internal.common_methods_invocations import ( + op_db, + UnaryUfuncInfo, + ReductionOpInfo, + SpectralFuncInfo, ops_and_refs, + BinaryUfuncInfo, python_ref_db, ) from torch.testing._internal.common_utils import ( NoTest, run_tests, + set_default_dtype, suppress_warnings, + TEST_WITH_TORCHINDUCTOR, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) +# Get names of all the operators which have ref in their entry in OpInfo (testing infra) +# except for elementwise unary operators (separately implemented in test/test_unary_ufuncs.py), +# elementwise binary operators (separately implemented in test_binary_ufuncs.py), +# reduction operations (separately impelemented in test_reductions.py), +# and Spectral Functions (separately implemented for only 1D as of now, in test/test_spectral_ops.py) +_ref_test_ops = tuple( + filter( + lambda op: not isinstance( + op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) + ) + and op.ref is not None, + op_db, + ) +) + if not TEST_XPU: print("XPU not available, skipping tests", file=sys.stderr) TestCase = NoTest # noqa: F811 @@ -71,6 +93,7 @@ "nn.functional.relu", "nn.functional.gelu", "arange", + "clamp", ] _xpu_tensor_factory_op_list = [ "normal", @@ -91,6 +114,17 @@ _xpu_float_only_op_list = [ "reciprocal", # Align with CUDA impl. Only float and complex supported in CUDA native. ] +_xpu_not_test_numpy_ref_op_list = [ + "clone", # Numpy copy cannot pass memory_format. +] + +# test_numpy_ref +_xpu_numpy_ref_op_list = _xpu_computation_op_list.copy() +for op in _xpu_not_test_numpy_ref_op_list: + _xpu_numpy_ref_op_list.remove(op) +_xpu_numpy_ref_ops = [ + op for op in _ref_test_ops if op.name in _xpu_numpy_ref_op_list +] # test_compare_cpu _xpu_computation_ops = [ @@ -120,6 +154,30 @@ class TestXpu(TestCase): + # Tests that the function and its (ndarray-accepting) reference produce the same + # values on the tensors from sample_inputs func for the corresponding op. + # This test runs in double and complex double precision because + # NumPy does computation internally using double precision for many functions + # resulting in possible equality check failures. + @onlyXPU + @suppress_warnings + @ops(_xpu_numpy_ref_ops, allowed_dtypes=(torch.float64, torch.long, torch.complex128)) + def test_numpy_ref(self, device, dtype, op): + if ( + TEST_WITH_TORCHINDUCTOR and + op.formatted_name in ('signal_windows_exponential', 'signal_windows_bartlett') and + dtype == torch.float64 and 'cuda' in device + ): # noqa: E121 + raise unittest.SkipTest("XXX: raises tensor-likes are not close.") + + # Sets the default dtype to NumPy's default dtype of double + with set_default_dtype(torch.double): + for sample_input in op.reference_inputs(device, dtype): + print(sample_input) + self.compare_with_reference( + op, op.ref, sample_input, exact_dtype=(dtype is not torch.long) + ) + @onlyXPU @suppress_warnings @ops(_xpu_computation_ops, dtypes=any_common_cpu_xpu_one)