Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add test_op::test_numpy_ref #81

Merged
merged 2 commits into from
Mar 27, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions test/xpu/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,40 @@
ops,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
ops_and_refs,
BinaryUfuncInfo,
python_ref_db,
)
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
set_default_dtype,
suppress_warnings,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)

# Get names of all the operators which have ref in their entry in OpInfo (testing infra)
# except for elementwise unary operators (separately implemented in test/test_unary_ufuncs.py),
# elementwise binary operators (separately implemented in test_binary_ufuncs.py),
# reduction operations (separately impelemented in test_reductions.py),
# and Spectral Functions (separately implemented for only 1D as of now, in test/test_spectral_ops.py)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)

if not TEST_XPU:
print("XPU not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811
Expand Down Expand Up @@ -71,6 +93,7 @@
"nn.functional.relu",
"nn.functional.gelu",
"arange",
"clamp",
]
_xpu_tensor_factory_op_list = [
"normal",
Expand All @@ -91,6 +114,17 @@
_xpu_float_only_op_list = [
"reciprocal", # Align with CUDA impl. Only float and complex supported in CUDA native.
]
_xpu_not_test_numpy_ref_op_list = [
"clone", # Numpy copy cannot pass memory_format.
]

# test_numpy_ref
_xpu_numpy_ref_op_list = _xpu_computation_op_list.copy()
for op in _xpu_not_test_numpy_ref_op_list:
_xpu_numpy_ref_op_list.remove(op)
_xpu_numpy_ref_ops = [
op for op in _ref_test_ops if op.name in _xpu_numpy_ref_op_list
]

# test_compare_cpu
_xpu_computation_ops = [
Expand Down Expand Up @@ -120,6 +154,30 @@

class TestXpu(TestCase):

# Tests that the function and its (ndarray-accepting) reference produce the same
# values on the tensors from sample_inputs func for the corresponding op.
# This test runs in double and complex double precision because
# NumPy does computation internally using double precision for many functions
# resulting in possible equality check failures.
@onlyXPU
@suppress_warnings
@ops(_xpu_numpy_ref_ops, allowed_dtypes=(torch.float64, torch.long, torch.complex128))
def test_numpy_ref(self, device, dtype, op):
if (
TEST_WITH_TORCHINDUCTOR and
op.formatted_name in ('signal_windows_exponential', 'signal_windows_bartlett') and
dtype == torch.float64 and 'cuda' in device
): # noqa: E121
raise unittest.SkipTest("XXX: raises tensor-likes are not close.")

# Sets the default dtype to NumPy's default dtype of double
with set_default_dtype(torch.double):
for sample_input in op.reference_inputs(device, dtype):
print(sample_input)
self.compare_with_reference(
op, op.ref, sample_input, exact_dtype=(dtype is not torch.long)
)

@onlyXPU
@suppress_warnings
@ops(_xpu_computation_ops, dtypes=any_common_cpu_xpu_one)
Expand Down
Loading