Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Apply fallback to numpy for all unsupported functions on cuda devices. #2075

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
Open
32 changes: 32 additions & 0 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@
"get_normalized_queue_device",
"get_result_array",
"get_usm_ndarray",
"is_cuda_backend",
"get_usm_ndarray_or_scalar",
"is_supported_array_or_scalar",
"is_supported_array_type",
Expand Down Expand Up @@ -757,6 +758,37 @@ def get_usm_ndarray_or_scalar(a):
return a if dpnp.isscalar(a) else get_usm_ndarray(a)


def is_cuda_backend(obj=None):
"""
Checks that object has a cuda backend.
npolina4 marked this conversation as resolved.
Show resolved Hide resolved

Parameters
----------
obj : {Device, SyclDevice, SyclQueue, dpnp.ndarray, usm_ndarray, None},
optional
An input object with sycl_device property to check device backend.
If obj is ``None``, device backend will be checked for the default
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
queue.
Default: ``None``.

Returns
-------
out : bool
Return ``True`` if object has a cuda backend, otherwise``False``.
vlad-perevezentsev marked this conversation as resolved.
Show resolved Hide resolved

"""

if obj is None:
sycl_device = dpctl.SyclQueue().sycl_device
elif isinstance(obj, dpctl.SyclDevice):
sycl_device = obj
else:
sycl_device = getattr(obj, "sycl_device", None)
if sycl_device is not None and "cuda" in sycl_device.backend.name:
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
return True
return False


def is_supported_array_or_scalar(a):
"""
Return ``True`` if `a` is a scalar or an array of either
Expand Down
3 changes: 3 additions & 0 deletions dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def choose(x1, choices, out=None, mode="raise"):
:obj:`dpnp.take_along_axis` : Preferable if choices is an array.

"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)

choices_list = []
Expand All @@ -192,6 +193,8 @@ def choose(x1, choices, out=None, mode="raise"):
pass
elif not choices_list:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
size = x1_desc.size
choices_size = choices_list[0].size
Expand Down
2 changes: 1 addition & 1 deletion dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def erf(in_array1):
x1_desc = dpnp.get_dpnp_descriptor(
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if x1_desc and dpnp.is_cuda_backend(in_array1):
return dpnp_erf(x1_desc).get_pyobj()

result = create_output_descriptor_py(
Expand Down
9 changes: 7 additions & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2478,8 +2478,13 @@ def modf(x1, **kwargs):
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_modf(x1_desc)
if x1_desc:
if not kwargs:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
return dpnp_modf(x1_desc)

return call_origin(numpy.modf, x1, **kwargs)

Expand Down
2 changes: 2 additions & 0 deletions dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,8 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):
pass
elif order is not None:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
return dpnp_partition(x1_desc, kth, axis, kind, order).get_pyobj()

Expand Down
4 changes: 4 additions & 0 deletions dpnp/dpnp_iface_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,8 @@ def correlate(x1, x2, mode="valid"):
pass
elif mode != "valid":
pass
elif dpnp.is_cuda_backend(x1) or dpnp.is_cuda_backend(x2):
pass
else:
return dpnp_correlate(x1_desc, x2_desc).get_pyobj()

Expand Down Expand Up @@ -665,6 +667,8 @@ def median(x1, axis=None, out=None, overwrite_input=False, keepdims=False):
pass
elif keepdims:
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
result_obj = dpnp_median(x1_desc).get_pyobj()
result = dpnp.convert_single_elem_array_to_scalar(result_obj)
Expand Down
68 changes: 66 additions & 2 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ def beta(a, b, size=None):
pass
elif b <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_beta(a, b, size).get_pyobj()

Expand Down Expand Up @@ -196,6 +198,8 @@ def binomial(n, p, size=None):
pass
elif n < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_binomial(int(n), p, size).get_pyobj()

Expand Down Expand Up @@ -244,6 +248,8 @@ def chisquare(df, size=None):
pass
elif df <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
# TODO:
# float to int, safe
Expand Down Expand Up @@ -312,6 +318,8 @@ def exponential(scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_exponential(scale, size).get_pyobj()

Expand Down Expand Up @@ -348,6 +356,8 @@ def f(dfnum, dfden, size=None):
pass
elif dfden <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_f(dfnum, dfden, size).get_pyobj()

Expand Down Expand Up @@ -386,6 +396,8 @@ def gamma(shape, scale=1.0, size=None):
pass
elif shape < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_gamma(shape, scale, size).get_pyobj()

Expand Down Expand Up @@ -420,6 +432,8 @@ def geometric(p, size=None):
pass
elif p > 1 or p <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_geometric(p, size).get_pyobj()

Expand Down Expand Up @@ -456,6 +470,8 @@ def gumbel(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_gumbel(loc, scale, size).get_pyobj()

Expand Down Expand Up @@ -504,6 +520,8 @@ def hypergeometric(ngood, nbad, nsample, size=None):
pass
elif nsample < 1:
pass
elif dpnp.is_cuda_backend():
pass
else:
_m = int(ngood)
_l = int(ngood) + int(nbad)
Expand Down Expand Up @@ -542,6 +560,8 @@ def laplace(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_laplace(loc, scale, size).get_pyobj()

Expand Down Expand Up @@ -576,6 +596,8 @@ def logistic(loc=0.0, scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
result = dpnp_rng_logistic(loc, scale, size).get_pyobj()
if size is None or size == 1:
Expand Down Expand Up @@ -617,6 +639,8 @@ def lognormal(mean=0.0, sigma=1.0, size=None):
pass
elif sigma < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_lognormal(mean, sigma, size).get_pyobj()

Expand Down Expand Up @@ -674,6 +698,8 @@ def multinomial(n, pvals, size=None):
pass
elif pvals_sum < 0.0:
pass
elif dpnp.is_cuda_backend():
pass
else:
if size is None:
shape = (d,)
Expand Down Expand Up @@ -725,6 +751,8 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8):
pass
elif mean_.shape[0] != cov_.shape[0]:
pass
elif dpnp.is_cuda_backend():
pass
else:
final_shape = list(shape[:])
final_shape.append(mean_.shape[0])
Expand Down Expand Up @@ -777,6 +805,8 @@ def negative_binomial(n, p, size=None):
pass
elif n <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_negative_binomial(n, p, size).get_pyobj()

Expand Down Expand Up @@ -862,6 +892,8 @@ def noncentral_chisquare(df, nonc, size=None):
pass
elif nonc < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_noncentral_chisquare(df, nonc, size).get_pyobj()

Expand Down Expand Up @@ -912,6 +944,8 @@ def pareto(a, size=None):
pass
elif a <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_pareto(a, size).get_pyobj()

Expand Down Expand Up @@ -981,6 +1015,8 @@ def poisson(lam=1.0, size=None):
pass
elif lam < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_poisson(lam, size).get_pyobj()

Expand Down Expand Up @@ -1016,6 +1052,8 @@ def power(a, size=None):
pass
elif a <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_power(a, size).get_pyobj()

Expand Down Expand Up @@ -1423,6 +1461,8 @@ def rayleigh(scale=1.0, size=None):
pass
elif scale < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_rayleigh(scale, size).get_pyobj()

Expand Down Expand Up @@ -1495,6 +1535,8 @@ def shuffle(x1):
if x1_desc:
if not dpnp.is_type_supported(x1_desc.dtype):
pass
elif dpnp.is_cuda_backend(x1):
pass
else:
dpnp_rng_shuffle(x1_desc).get_pyobj()
return
Expand Down Expand Up @@ -1545,6 +1587,8 @@ def seed(seed=None, device=None, sycl_queue=None):
pass
elif seed < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
# TODO:
# migrate to a single approach with RandomState class
Expand Down Expand Up @@ -1577,7 +1621,10 @@ def standard_cauchy(size=None):
"""

if not use_origin_backend(size):
return dpnp_rng_standard_cauchy(size).get_pyobj()
if dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_cauchy(size).get_pyobj()

return call_origin(numpy.random.standard_cauchy, size)

Expand All @@ -1602,7 +1649,10 @@ def standard_exponential(size=None):
"""

if not use_origin_backend(size):
return dpnp_rng_standard_exponential(size).get_pyobj()
if dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_exponential(size).get_pyobj()

return call_origin(numpy.random.standard_exponential, size)

Expand Down Expand Up @@ -1636,6 +1686,8 @@ def standard_gamma(shape, size=None):
pass
elif shape < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_gamma(shape, size).get_pyobj()

Expand Down Expand Up @@ -1714,6 +1766,8 @@ def standard_t(df, size=None):
pass
elif df <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_standard_t(df, size).get_pyobj()

Expand Down Expand Up @@ -1758,6 +1812,8 @@ def triangular(left, mode, right, size=None):
pass
elif left == right:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_triangular(left, mode, right, size).get_pyobj()

Expand Down Expand Up @@ -1862,6 +1918,8 @@ def vonmises(mu, kappa, size=None):
return dpnp.nan
elif kappa < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_vonmises(mu, kappa, size).get_pyobj()

Expand Down Expand Up @@ -1898,6 +1956,8 @@ def wald(mean, scale, size=None):
pass
elif scale <= 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_wald(mean, scale, size).get_pyobj()

Expand Down Expand Up @@ -1930,6 +1990,8 @@ def weibull(a, size=None):
pass
elif a < 0:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_weibull(a, size).get_pyobj()

Expand Down Expand Up @@ -1962,6 +2024,8 @@ def zipf(a, size=None):
pass
elif a <= 1:
pass
elif dpnp.is_cuda_backend():
pass
else:
return dpnp_rng_zipf(a, size).get_pyobj()

Expand Down
Loading
Loading