diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index 6a174b3b647..8b1f4c48a11 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -216,8 +216,6 @@ enum class DPNPFuncName : size_t DPNP_FN_MULTIPLY_EXT, /**< Used in numpy.multiply() impl, requires extra parameters */ DPNP_FN_NANVAR, /**< Used in numpy.nanvar() impl */ - DPNP_FN_NANVAR_EXT, /**< Used in numpy.nanvar() impl, requires extra - parameters */ DPNP_FN_NEGATIVE, /**< Used in numpy.negative() impl */ DPNP_FN_NONZERO, /**< Used in numpy.nonzero() impl */ DPNP_FN_ONES, /**< Used in numpy.ones() impl */ @@ -374,8 +372,7 @@ enum class DPNPFuncName : size_t */ DPNP_FN_SQUARE, /**< Used in numpy.square() impl */ DPNP_FN_STD, /**< Used in numpy.std() impl */ - DPNP_FN_STD_EXT, /**< Used in numpy.std() impl, requires extra parameters */ - DPNP_FN_SUBTRACT, /**< Used in numpy.subtract() impl */ + DPNP_FN_SUBTRACT, /**< Used in numpy.subtract() impl */ DPNP_FN_SUBTRACT_EXT, /**< Used in numpy.subtract() impl, requires extra parameters */ DPNP_FN_SUM, /**< Used in numpy.sum() impl */ @@ -386,21 +383,20 @@ enum class DPNPFuncName : size_t DPNP_FN_TAKE, /**< Used in numpy.take() impl */ DPNP_FN_TAN, /**< Used in numpy.tan() impl */ DPNP_FN_TANH, /**< Used in numpy.tanh() impl */ - DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ - DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ - DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra - parameters */ - DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ - DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra - parameters */ - DPNP_FN_TRI, /**< Used in numpy.tri() impl */ - DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ - DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ - DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ - DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ - DPNP_FN_VAR, /**< Used in numpy.var() impl */ - DPNP_FN_VAR_EXT, /**< Used in numpy.var() impl, requires extra parameters */ - DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ + DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ + DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ + DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra + parameters */ + DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ + DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra + parameters */ + DPNP_FN_TRI, /**< Used in numpy.tri() impl */ + DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ + DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ + DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ + DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ + DPNP_FN_VAR, /**< Used in numpy.var() impl */ + DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ DPNP_FN_ZEROS_LIKE, /**< Used in numpy.zeros_like() impl */ DPNP_FN_LAST, /**< The latest element of the enumeration */ }; diff --git a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp index 5c0ca1f6591..8f685c97cb3 100644 --- a/dpnp/backend/kernels/dpnp_krnl_statistics.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_statistics.cpp @@ -939,16 +939,6 @@ template void (*dpnp_nanvar_default_c)(void *, void *, void *, const size_t, size_t) = dpnp_nanvar_c<_DataType>; -template -DPCTLSyclEventRef (*dpnp_nanvar_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - void *, - const size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_nanvar_c<_DataType>; - template DPCTLSyclEventRef dpnp_std_c(DPCTLSyclQueueRef q_ref, void *array1_in, @@ -1039,18 +1029,6 @@ void (*dpnp_std_default_c)(void *, size_t, size_t) = dpnp_std_c<_DataType, _ResultType>; -template -DPCTLSyclEventRef (*dpnp_std_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_std_c<_DataType, _ResultType>; - template class dpnp_var_c_kernel; @@ -1150,18 +1128,6 @@ void (*dpnp_var_default_c)(void *, size_t, size_t) = dpnp_var_c<_DataType, _ResultType>; -template -DPCTLSyclEventRef (*dpnp_var_ext_c)(DPCTLSyclQueueRef, - void *, - void *, - const shape_elem_type *, - size_t, - const shape_elem_type *, - size_t, - size_t, - const DPCTLEventVectorRef) = - dpnp_var_c<_DataType, _ResultType>; - void func_map_init_statistics(func_map_t &fmap) { fmap[DPNPFuncName::DPNP_FN_CORRELATE][eft_INT][eft_INT] = { @@ -1316,15 +1282,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_NANVAR][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_nanvar_default_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_INT][eft_INT] = { - eft_INT, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_NANVAR_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_nanvar_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_std_default_c}; fmap[DPNPFuncName::DPNP_FN_STD][eft_LNG][eft_LNG] = { @@ -1334,15 +1291,6 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_STD][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_std_default_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_INT][eft_INT] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_STD_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_std_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR][eft_INT][eft_INT] = { eft_DBL, (void *)dpnp_var_default_c}; fmap[DPNPFuncName::DPNP_FN_VAR][eft_LNG][eft_LNG] = { @@ -1352,14 +1300,5 @@ void func_map_init_statistics(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_VAR][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_var_default_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_INT][eft_INT] = { - eft_DBL, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_LNG][eft_LNG] = { - eft_DBL, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_var_ext_c}; - fmap[DPNPFuncName::DPNP_FN_VAR_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_var_ext_c}; - return; } diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index d49adcf0b7f..18813e3e04c 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -100,8 +100,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_MINIMUM_EXT DPNP_FN_MODF DPNP_FN_MODF_EXT - DPNP_FN_NANVAR - DPNP_FN_NANVAR_EXT DPNP_FN_NONZERO DPNP_FN_ONES DPNP_FN_ONES_LIKE @@ -187,8 +185,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_SEARCHSORTED_EXT DPNP_FN_SORT DPNP_FN_SORT_EXT - DPNP_FN_STD - DPNP_FN_STD_EXT DPNP_FN_SUM DPNP_FN_SUM_EXT DPNP_FN_SVD @@ -202,8 +198,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_TRIL_EXT DPNP_FN_TRIU DPNP_FN_TRIU_EXT - DPNP_FN_VAR - DPNP_FN_VAR_EXT DPNP_FN_ZEROS DPNP_FN_ZEROS_LIKE diff --git a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi index 34e0684fcbf..37d51d131ff 100644 --- a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi @@ -39,78 +39,15 @@ __all__ += [ "dpnp_average", "dpnp_correlate", "dpnp_median", - "dpnp_nanvar", - "dpnp_std", - "dpnp_var", ] -# C function pointer to the C library template functions -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_cov_1in_1out_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_nanvar_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , void * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) -ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_custom_std_var_1in_1out_t)(c_dpctl.DPCTLSyclQueueRef, - void *, void * , shape_elem_type * , size_t, - shape_elem_type * , size_t, size_t, - const c_dpctl.DPCTLEventVectorRef) - # C function pointer to the C library template functions ctypedef c_dpctl.DPCTLSyclEventRef(*custom_statistic_1in_1out_func_ptr_t)(c_dpctl.DPCTLSyclQueueRef, void *, void * , shape_elem_type * , size_t, shape_elem_type * , size_t, const c_dpctl.DPCTLEventVectorRef) -cdef utils.dpnp_descriptor call_fptr_custom_std_var_1in_1out(DPNPFuncName fptr_name, utils.dpnp_descriptor x1, ddof): - cdef shape_type_c x1_shape = x1.shape - - """ Convert string type names (array.dtype) to C enum DPNPFuncType """ - cdef DPNPFuncType param_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - """ get the FPTR data structure """ - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(fptr_name, param_type, DPNP_FT_NONE) - - x1_obj = x1.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = (1,) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef fptr_custom_std_var_1in_1out_t func = kernel_data.ptr - - # stub for interface support - cdef shape_type_c axis - cdef Py_ssize_t axis_size = 0 - - """ Call FPTR function """ - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - x1.get_data(), - result.get_data(), - x1_shape.data(), - x1.ndim, - axis.data(), - axis_size, - ddof, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - - return result - cpdef dpnp_average(utils.dpnp_descriptor x1): array_sum = dpnp_sum(x1).get_pyobj() @@ -207,53 +144,3 @@ cpdef utils.dpnp_descriptor dpnp_median(utils.dpnp_descriptor array1): c_dpctl.DPCTLEvent_Delete(event_ref) return result - - -cpdef utils.dpnp_descriptor dpnp_nanvar(utils.dpnp_descriptor arr, ddof): - # dpnp_isnan does not support USM array as input in comparison to dpnp.isnan - cdef utils.dpnp_descriptor mask_arr = dpnp.get_dpnp_descriptor(dpnp.isnan(arr.get_pyobj()), - copy_when_nondefault_queue=False) - n = dpnp.count_nonzero(mask_arr.get_pyobj()) - res_size = int(arr.size - n) - cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(arr.dtype) - - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_NANVAR_EXT, param1_type, param1_type) - - arr_obj = arr.get_array() - - # create result array with type given by FPTR data - cdef shape_type_c result_shape = utils._object_to_tuple(res_size) - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - kernel_data.return_type, - None, - device=arr_obj.sycl_device, - usm_type=arr_obj.usm_type, - sycl_queue=arr_obj.sycl_queue) - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - cdef fptr_custom_nanvar_t func = kernel_data.ptr - - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - arr.get_data(), - mask_arr.get_data(), - result.get_data(), - result.size, - arr.size, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return call_fptr_custom_std_var_1in_1out(DPNP_FN_VAR_EXT, result, ddof) - - -cpdef utils.dpnp_descriptor dpnp_std(utils.dpnp_descriptor a, size_t ddof): - return call_fptr_custom_std_var_1in_1out(DPNP_FN_STD_EXT, a, ddof) - - -cpdef utils.dpnp_descriptor dpnp_var(utils.dpnp_descriptor a, size_t ddof): - return call_fptr_custom_std_var_1in_1out(DPNP_FN_VAR_EXT, a, ddof) diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index c1fbbc1d124..a5b060396ba 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -1167,15 +1167,23 @@ def squeeze(self, axis=None): return dpnp.squeeze(self, axis) - def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """Returns the variance of the array elements, along given axis. - - .. seealso:: - :obj:`dpnp.var` for full documentation, + def std( + self, + axis=None, + dtype=None, + out=None, + ddof=0, + keepdims=False, + *, + where=True, + ): + """ + Returns the standard deviation of the array elements, along given axis. + Refer to :obj:`dpnp.std` for full documentation. """ - return dpnp.std(self, axis, dtype, out, ddof, keepdims) + return dpnp.std(self, axis, dtype, out, ddof, keepdims, where=where) @property def strides(self): @@ -1207,10 +1215,7 @@ def sum( """ Returns the sum along a given axis. - .. seealso:: - :obj:`dpnp.sum` for full documentation, - :meth:`dpnp.dparray.sum` - + For full documentation refer to :obj:`dpnp.sum`. """ return dpnp.sum( @@ -1307,23 +1312,22 @@ def transpose(self, *axes): res._array_obj = dpt.permute_dims(self._array_obj, axes) return res - def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + def var( + self, + axis=None, + dtype=None, + out=None, + ddof=0, + keepdims=False, + *, + where=True, + ): """ - Returns the variance of the array elements along given axis. - - Masked entries are ignored, and result elements which are not - finite will be masked. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - :obj:`numpy.ndarray.var` : corresponding function for ndarrays - :obj:`numpy.var` : Equivalent function + Returns the variance of the array elements, along given axis. + Refer to :obj:`dpnp.var` for full documentation. """ - - return dpnp.var(self, axis, dtype, out, ddof, keepdims) + return dpnp.var(self, axis, dtype, out, ddof, keepdims, where=where) # 'view' diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index 247264a79c5..215509c1fc3 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -92,6 +90,8 @@ from dpnp.dpnp_iface_manipulation import __all__ as __all__manipulation from dpnp.dpnp_iface_mathematical import * from dpnp.dpnp_iface_mathematical import __all__ as __all__mathematical +from dpnp.dpnp_iface_nanfunctions import * +from dpnp.dpnp_iface_nanfunctions import __all__ as __all__nanfunctions from dpnp.dpnp_iface_searching import * from dpnp.dpnp_iface_searching import __all__ as __all__searching from dpnp.dpnp_iface_sorting import * @@ -110,6 +110,7 @@ __all__ += __all__logic __all__ += __all__manipulation __all__ += __all__mathematical +__all__ += __all__nanfunctions __all__ += __all__searching __all__ += __all__sorting __all__ += __all__statistics @@ -456,7 +457,7 @@ def get_normalized_queue_device(obj=None, device=None, sycl_queue=None): ) -def get_result_array(a, out=None): +def get_result_array(a, out=None, casting="safe"): """ If `out` is provided, value of `a` array will be copied into the `out` array according to ``safe`` casting rule. @@ -466,11 +467,12 @@ def get_result_array(a, out=None): ---------- a : {dpnp_array} Input array. - out : {dpnp_array, usm_ndarray} If provided, value of `a` array will be copied into it according to ``safe`` casting rule. It should be of the appropriate shape. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Returns ------- @@ -482,21 +484,15 @@ def get_result_array(a, out=None): if out is None: return a else: + dpnp.check_supported_arrays_type(out) if out.shape != a.shape: raise ValueError( f"Output array of shape {a.shape} is needed, got {out.shape}." ) - elif not isinstance(out, dpnp_array): - if isinstance(out, dpt.usm_ndarray): - out = dpnp_array._create_from_usm_ndarray(out) - else: - raise TypeError( - "Output array must be any of supported type, but got {}".format( - type(out) - ) - ) - - dpnp.copyto(out, a, casting="safe") + elif isinstance(out, dpt.usm_ndarray): + out = dpnp_array._create_from_usm_ndarray(out) + + dpnp.copyto(out, a, casting=casting) return out diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 7ee53bca377..0913ddb886d 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index f6ca59e6907..53ae89d2a51 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -1,5 +1,3 @@ -# cython: language_level=3 -# distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2023, Intel Corporation @@ -109,10 +107,6 @@ "mod", "modf", "multiply", - "nancumprod", - "nancumsum", - "nanprod", - "nansum", "negative", "positive", "power", @@ -1769,179 +1763,6 @@ def multiply( ) -def nancumprod(x1, **kwargs): - """ - Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. - - For full documentation refer to :obj:`numpy.nancumprod`. - - Limitations - ----------- - Parameter `x` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - .. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis. - - Examples - -------- - >>> import dpnp as np - >>> a = np.array([1., np.nan]) - >>> result = np.nancumprod(a) - >>> [x for x in result] - [1.0, 1.0] - >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) - >>> result = np.nancumprod(b) - >>> [x for x in result] - [1.0, 2.0, 2.0, 8.0, 8.0, 48.0] - - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - return dpnp_nancumprod(x1_desc).get_pyobj() - - return call_origin(numpy.nancumprod, x1, **kwargs) - - -def nancumsum(x1, **kwargs): - """ - Return the cumulative sum of the elements along a given axis. - - For full documentation refer to :obj:`numpy.nancumsum`. - - Limitations - ----------- - Parameter `x` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - See Also - -------- - :obj:`dpnp.cumsum` : Return the cumulative sum of the elements along a given axis. - - Examples - -------- - >>> import dpnp as np - >>> a = np.array([1., np.nan]) - >>> result = np.nancumsum(a) - >>> [x for x in result] - [1.0, 1.0] - >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) - >>> result = np.nancumprod(b) - >>> [x for x in result] - [1.0, 3.0, 3.0, 7.0, 7.0, 13.0] - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - return dpnp_nancumsum(x1_desc).get_pyobj() - - return call_origin(numpy.nancumsum, x1, **kwargs) - - -def nanprod( - a, - axis=None, - dtype=None, - out=None, - keepdims=False, - initial=None, - where=True, -): - """ - Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. - - For full documentation refer to :obj:`numpy.nanprod`. - - Returns - ------- - out : dpnp.ndarray - A new array holding the result is returned unless `out` is specified, in which case it is returned. - - See Also - -------- - :obj:`dpnp.prod` : Returns product across array propagating NaNs. - :obj:`dpnp.isnan` : Test element-wise for NaN and return result as a boolean array. - - Limitations - ----------- - Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `initial`, and `where` are only supported with their default values. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - Examples - -------- - >>> import dpnp as np - >>> np.nanprod(np.array(1)) - array(1) - >>> np.nanprod(np.array([1])) - array(1) - >>> np.nanprod(np.array([1, np.nan])) - array(1.0) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanprod(a) - array(6.0) - >>> np.nanprod(a, axis=0) - array([3., 2.]) - - """ - - dpnp.check_supported_arrays_type(a) - - if issubclass(a.dtype.type, dpnp.inexact): - mask = dpnp.isnan(a) - a = dpnp.array(a, copy=True) - dpnp.copyto(a, 1, where=mask) - - return dpnp.prod( - a, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - initial=initial, - where=where, - ) - - -def nansum(x1, **kwargs): - """ - Calculate sum() function treating 'Not a Numbers' (NaN) as zero. - - For full documentation refer to :obj:`numpy.nansum`. - - Limitations - ----------- - Parameter `x1` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. - - Examples - -------- - >>> import dpnp as np - >>> np.nansum(np.array([1, 2])) - 3 - >>> np.nansum(np.array([[1, 2], [3, 4]])) - 10 - - """ - - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - result_obj = dpnp_nansum(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - return result - - return call_origin(numpy.nansum, x1, **kwargs) - - def negative( x, /, @@ -2233,11 +2054,11 @@ def prod( ) elif initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -2768,8 +2589,8 @@ def sum( ----------- Parameters `x` is supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `out`, `initial` and `where` are supported with their default values. - Otherwise the function will be executed sequentially on CPU. + Parameters `initial` and `where` are supported with their default values. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. Examples @@ -2790,12 +2611,14 @@ def sum( axis = normalize_axis_tuple(axis, x.ndim, "axis") - if out is not None: - pass - elif initial != 0: - pass + if initial != 0: + raise NotImplementedError( + "initial keyword argument is only supported with its default value." + ) elif where is not True: - pass + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) else: if ( len(x.shape) == 2 @@ -2853,18 +2676,8 @@ def sum( y = dpt.sum( dpnp.get_usm_ndarray(x), axis=axis, dtype=dtype, keepdims=keepdims ) - return dpnp_array._create_from_usm_ndarray(y) - - return call_origin( - numpy.sum, - x, - axis=axis, - dtype=dtype, - out=out, - keepdims=keepdims, - initial=initial, - where=where, - ) + result = dpnp_array._create_from_usm_ndarray(y) + return dpnp.get_result_array(result, out, casting="same_kind") def trapz(y1, x1=None, dx=1.0, axis=-1): diff --git a/dpnp/dpnp_iface_nanfunctions.py b/dpnp/dpnp_iface_nanfunctions.py new file mode 100644 index 00000000000..966a2c9a578 --- /dev/null +++ b/dpnp/dpnp_iface_nanfunctions.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +# ***************************************************************************** +# Copyright (c) 2016-2023, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +""" +Interface of the nan functions of the DPNP + +Notes +----- +This module is a face or public interface file for the library +it contains: + - Interface functions + - documentation for the functions + - The functions parameters check + +""" + +import numpy + +import dpnp + +from .dpnp_algo import * +from .dpnp_utils import * + +__all__ = [ + "nancumprod", + "nancumsum", + "nanprod", + "nansum", + "nanvar", +] + + +def _replace_nan(a, val): + """ + Replace NaNs in array `a` with `val`. + + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray} + Input array. + val : float + NaN values are set to `val` before doing the operation. + + Returns + ------- + out : {dpnp_array} + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return ``None``. + + """ + + dpnp.check_supported_arrays_type(a) + if issubclass(a.dtype.type, dpnp.inexact): + mask = dpnp.isnan(a) + if not dpnp.any(mask): + mask = None + else: + a = dpnp.array(a, copy=True) + dpnp.copyto(a, val, where=mask) + else: + mask = None + + return a, mask + + +def nancumprod(x1, **kwargs): + """ + Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. + + For full documentation refer to :obj:`numpy.nancumprod`. + + Limitations + ----------- + Parameter `x` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + .. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([1., np.nan]) + >>> result = np.nancumprod(a) + >>> [x for x in result] + [1.0, 1.0] + >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) + >>> result = np.nancumprod(b) + >>> [x for x in result] + [1.0, 2.0, 2.0, 8.0, 8.0, 48.0] + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + return dpnp_nancumprod(x1_desc).get_pyobj() + + return call_origin(numpy.nancumprod, x1, **kwargs) + + +def nancumsum(x1, **kwargs): + """ + Return the cumulative sum of the elements along a given axis. + + For full documentation refer to :obj:`numpy.nancumsum`. + + Limitations + ----------- + Parameter `x` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.cumsum` : Return the cumulative sum of the elements along a given axis. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([1., np.nan]) + >>> result = np.nancumsum(a) + >>> [x for x in result] + [1.0, 1.0] + >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) + >>> result = np.nancumprod(b) + >>> [x for x in result] + [1.0, 3.0, 3.0, 7.0, 7.0, 13.0] + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + return dpnp_nancumsum(x1_desc).get_pyobj() + + return call_origin(numpy.nancumsum, x1, **kwargs) + + +def nansum(x1, **kwargs): + """ + Calculate sum() function treating 'Not a Numbers' (NaN) as zero. + + For full documentation refer to :obj:`numpy.nansum`. + + Limitations + ----------- + Parameter `x1` is supported as :class:`dpnp.ndarray`. + Keyword argument `kwargs` is currently unsupported. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + Examples + -------- + >>> import dpnp as np + >>> np.nansum(np.array([1, 2])) + 3 + >>> np.nansum(np.array([[1, 2], [3, 4]])) + 10 + + """ + + x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) + if x1_desc and not kwargs: + result_obj = dpnp_nansum(x1_desc).get_pyobj() + result = dpnp.convert_single_elem_array_to_scalar(result_obj) + return result + + return call_origin(numpy.nansum, x1, **kwargs) + + +def nanprod( + a, + axis=None, + dtype=None, + out=None, + keepdims=False, + initial=None, + where=True, +): + """ + Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. + + For full documentation refer to :obj:`numpy.nanprod`. + + Returns + ------- + out : dpnp.ndarray + A new array holding the result is returned unless `out` is specified, in which case it is returned. + + See Also + -------- + :obj:`dpnp.prod` : Returns product across array propagating NaNs. + :obj:`dpnp.isnan` : Test element-wise for NaN and return result as a boolean array. + + Limitations + ----------- + Input array is only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. + Parameters `initial`, and `where` are only supported with their default values. + Otherwise the function will be executed sequentially on CPU. + Input array data types are limited by supported DPNP :ref:`Data types`. + + Examples + -------- + >>> import dpnp as np + >>> np.nanprod(np.array(1)) + array(1) + >>> np.nanprod(np.array([1])) + array(1) + >>> np.nanprod(np.array([1, np.nan])) + array(1.0) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + array(6.0) + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + + a, mask = _replace_nan(a, 1) + + return dpnp.prod( + a, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) + + +def nanvar( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanvar`. + + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + an array containing the variances. If the variance was computed + over the entire array, a zero-dimensional array is returned. + + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + + Limitations + ----------- + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. + Input array data types are limited by real valued data types. + + See Also + -------- + :obj:`dpnp.var` : Compute the variance along the specified axis. + :obj:`dpnp.std` : Compute the standard deviation along the specified axis. + :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, + ignoring NaNs. + :obj:`dpnp.nanstd` : Compute the standard deviation along + the specified axis, while ignoring NaNs. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + array(1.5555555555555554) + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + + dpnp.check_supported_arrays_type(a) + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + arr, mask = _replace_nan(a, 0) + if mask is None: + return dpnp.var( + arr, + axis=axis, + dtype=dtype, + out=out, + ddof=ddof, + keepdims=keepdims, + where=where, + ) + + if dtype is not None: + dtype = dpnp.dtype(dtype) + if not issubclass(dtype.type, dpnp.inexact): + raise TypeError( + "If input is inexact, then dtype must be inexact." + ) + if out is not None: + dpnp.check_supported_arrays_type(out) + if not dpnp.issubdtype(out.dtype, dpnp.inexact): + raise TypeError( + "If input is inexact, then out must be inexact." + ) + + # Compute mean + var_dtype = a.real.dtype if dtype is None else dtype + cnt = dpnp.sum( + ~mask, axis=axis, dtype=var_dtype, keepdims=True, where=where + ) + avg = dpnp.sum( + arr, axis=axis, dtype=var_dtype, keepdims=True, where=where + ) + avg = dpnp.divide(avg, cnt, out=avg) + + # Compute squared deviation from mean. + if arr.dtype == avg.dtype: + arr = dpnp.subtract(arr, avg, out=arr) + else: + arr = dpnp.subtract(arr, avg) + dpnp.copyto(arr, 0.0, where=mask) + if dpnp.issubdtype(arr.dtype, dpnp.complexfloating): + sqr = dpnp.multiply(arr, arr.conj(), out=arr).real + else: + sqr = dpnp.multiply(arr, arr, out=arr) + + # Compute variance + var = dpnp.sum( + sqr, + axis=axis, + dtype=var_dtype, + out=out, + keepdims=keepdims, + where=where, + ) + + if var.ndim < cnt.ndim: + cnt = cnt.squeeze(axis) + cnt -= ddof + dpnp.divide(var, cnt, out=var) + + isbad = cnt <= 0 + if dpnp.any(isbad): + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + dpnp.copyto(var, dpnp.nan, where=isbad) + + return var diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 07cad8e4f30..5d00154659c 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -40,6 +40,7 @@ import dpctl.tensor as dpt import numpy +from numpy.core.numeric import normalize_axis_index import dpnp from dpnp.dpnp_algo import * @@ -60,12 +61,52 @@ "median", "min", "ptp", - "nanvar", "std", "var", ] +def _count_reduce_items(arr, axis, where=True): + """ + Calculates the number of items used in a reduction operation along the specified axis or axes + + Parameters + ---------- + arr : {dpnp_array, usm_ndarray} + Input array. + axis : int or tuple of ints, optional + axis or axes along which the number of items used in a reduction operation must be counted. + If a tuple of unique integers is given, the items are counted over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + + Returns + ------- + out : int + The number of items should be used in a reduction operation. + + Limitations + ----------- + Parameters `where` is only supported with its default value. + + """ + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[normalize_axis_index(ax, arr.ndim)] + items = dpnp.intp(items) + else: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + return items + + def amax(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the maximum of an array or maximum along an axis. @@ -342,7 +383,7 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): ----------- Input and output arrays are only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `where`, and `initial` are supported only with their default values. + Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -378,11 +419,11 @@ def max(a, axis=None, out=None, keepdims=False, initial=None, where=True): if initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -409,7 +450,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ----------- Parameters `a` is supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameter `where` is supported only with their default values. + Parameter `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -440,7 +481,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): if where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -517,7 +558,7 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): ----------- Input and output arrays are only supported as either :class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`. - Parameters `where`, and `initial` are supported only with their default values. + Parameters `where`, and `initial` are only supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. @@ -553,11 +594,11 @@ def min(a, axis=None, out=None, keepdims=False, initial=None, where=True): if initial is not None: raise NotImplementedError( - "initial keyword argument is only supported by its default value." + "initial keyword argument is only supported with its default value." ) elif where is not True: raise NotImplementedError( - "where keyword argument is only supported by its default value." + "where keyword argument is only supported with its default value." ) else: dpt_array = dpnp.get_usm_ndarray(a) @@ -611,70 +652,70 @@ def ptp( ) -def nanvar(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the variance along the specified axis, while ignoring NaNs. - - For full documentation refer to :obj:`numpy.nanvar`. - - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. +def std( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): """ + Compute the standard deviation along the specified axis. - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass - else: - result_obj = dpnp_nanvar(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - - return result - - return call_origin( - numpy.nanvar, - x1, - axis=axis, - dtype=dtype, - out=out, - ddof=ddof, - keepdims=keepdims, - ) + For full documentation refer to :obj:`numpy.std`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + nput array. + axis : int or tuple of ints, optional + Axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. -def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the standard deviation along the specified axis. + Returns + ------- + out : dpnp.ndarray + an array containing the standard deviations. If the standard + deviation was computed over the entire array, a zero-dimensional + array is returned. - For full documentation refer to :obj:`numpy.std`. + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Size of input array is limited by ``a.size > 0``. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. + See Also -------- + :obj:`dpnp.ndarray.std` : corresponding function for ndarrays. :obj:`dpnp.var` : Compute the variance along the specified axis. :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, @@ -689,50 +730,113 @@ def std(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): >>> import dpnp as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) - 1.118033988749895 + array(1.118033988749895) + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1_desc.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass - else: - result_obj = dpnp_std(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) + dpnp.check_supported_arrays_type(a) - return result + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + if dpnp.issubdtype(a.dtype, dpnp.complexfloating): + result = dpnp.var( + a, + axis=axis, + dtype=None, + out=out, + ddof=ddof, + keepdims=keepdims, + where=where, + ) + dpnp.sqrt(result, out=result) + else: + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.std( + dpt_array, axis=axis, correction=ddof, keepdims=keepdims + ) + ) + result = dpnp.get_result_array(result, out) - return call_origin(numpy.std, x1, axis, dtype, out, ddof, keepdims) + if dtype is not None and out is None: + result = result.astype(dtype, casting="same_kind") + return result -def var(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): +def var( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): """ Compute the variance along the specified axis. For full documentation refer to :obj:`numpy.var`. + Parameters + ---------- + a : {dpnp_array, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + axis or axes along which the variances must be computed. If a tuple + of unique integers is given, the variances are computed over multiple axes. + If ``None``, the variance is computed over the entire array. + Default: `None`. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default real-valued floating-point data type is used, + for arrays of float types it is the same as the array type. + out : {dpnp_array, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` corresponds to the total + number of elements over which the variance is calculated. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + an array containing the variances. If the variance was computed + over the entire array, a zero-dimensional array is returned. + + If `a` has a real-valued floating-point data type, the returned + array will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + Limitations ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Size of input array is limited by ``a.size > 0``. - Parameter `axis` is supported only with default value ``None``. - Parameter `dtype` is supported only with default value ``None``. - Parameter `out` is supported only with default value ``None``. - Parameter `keepdims` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. Input array data types are limited by supported DPNP :ref:`Data types`. + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. + See Also -------- + :obj:`dpnp.ndarray.var` : corresponding function for ndarrays. :obj:`dpnp.std` : Compute the standard deviation along the specified axis. :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, @@ -747,26 +851,57 @@ def var(x1, axis=None, dtype=None, out=None, ddof=0, keepdims=False): >>> import dpnp as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) - 1.25 + array(1.25) + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if x1_desc.size == 0: - pass - elif axis is not None: - pass - elif dtype is not None: - pass - elif out is not None: - pass - elif keepdims: - pass + dpnp.check_supported_arrays_type(a) + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + if dpnp.issubdtype(a.dtype, dpnp.complexfloating): + # Note that if dtype is not of inexact type then arrmean will not be either. + arrmean = dpnp.mean( + a, axis=axis, dtype=dtype, keepdims=True, where=where + ) + x = dpnp.subtract(a, arrmean) + x = dpnp.multiply(x, x.conj(), out=x).real + result = dpnp.sum( + x, + axis=axis, + dtype=a.real.dtype, + out=out, + keepdims=keepdims, + where=where, + ) + + cnt = _count_reduce_items(a, axis, where) + cnt = numpy.max(cnt - ddof, 0).astype( + result.dtype, casting="same_kind" + ) + if not cnt: + cnt = dpnp.nan + + dpnp.divide(result, cnt, out=result) else: - result_obj = dpnp_var(x1_desc, ddof).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - - return result - - return call_origin(numpy.var, x1, axis, dtype, out, ddof, keepdims) + dpt_array = dpnp.get_usm_ndarray(a) + result = dpnp_array._create_from_usm_ndarray( + dpt.var( + dpt_array, axis=axis, correction=ddof, keepdims=keepdims + ) + ) + result = dpnp.get_result_array(result, out) + + if out is None and dtype is not None: + result = result.astype(dtype, casting="same_kind") + return result diff --git a/tests/conftest.py b/tests/conftest.py index 0213f52e09e..231af2e34fa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,6 +26,7 @@ import os import sys +import warnings import dpctl import numpy @@ -122,6 +123,20 @@ def suppress_invalid_numpy_warnings(): numpy.seterr(**old_settings) # reset to default +@pytest.fixture +def suppress_dof_numpy_warnings(): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"Degrees of freedom <= 0 for slice") + yield + + +@pytest.fixture +def suppress_mean_empty_slice_numpy_warnings(): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"Mean of empty slice") + yield + + @pytest.fixture def suppress_divide_invalid_numpy_warnings( suppress_divide_numpy_warnings, suppress_invalid_numpy_warnings diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 87a29e2cb0c..cca2992e656 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -555,8 +555,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff_types[full] tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike @@ -1026,57 +1024,30 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{ax tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_out tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index b6f6ceb4591..b8df4b5179d 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -652,8 +652,6 @@ tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff tests/third_party/cupy/math_tests/test_misc.py::TestConvolve::test_convolve_diff_types[full] tests/third_party/cupy/math_tests/test_rounding.py::TestRounding::test_fix -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestSumprod::test_sum_out_wrong_shape tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_ndarray_cumprod_2dim_with_axis tests/third_party/cupy/math_tests/test_sumprod.py::TestCumprod::test_cumprod_arraylike @@ -1087,57 +1085,30 @@ tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{ax tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanvar_out tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanvar tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanvar tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_all_nan tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_nanmax_axis0 diff --git a/tests/skipped_tests_gpu_no_fp64.tbl b/tests/skipped_tests_gpu_no_fp64.tbl index 0e043ee7452..26e11a70062 100644 --- a/tests/skipped_tests_gpu_no_fp64.tbl +++ b/tests/skipped_tests_gpu_no_fp64.tbl @@ -285,15 +285,3 @@ tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeib tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull_for_inf_a tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull_for_inf_a - - -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_std_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_std_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_var_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_external_var_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_std_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_std_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_var_all -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestMeanVar::test_var_all_ddof -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestProductZeroLength_param_6_{func='std', params=((), None)}::test_external_mean_zero_len -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestProductZeroLength_param_12_{func='var', params=((), None)}::test_external_mean_zero_len diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 7484a66bfb5..51a8de6a392 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -2082,11 +2082,10 @@ def test_sum_empty(dtype, axis): assert_array_equal(numpy_res, dpnp_res.asnumpy()) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True, no_bool=True)) +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_sum_empty_out(dtype): a = dpnp.empty((1, 2, 0, 4), dtype=dtype) - out = dpnp.ones(()) + out = dpnp.ones((), dtype=dtype) res = a.sum(out=out) assert_array_equal(out.asnumpy(), res.asnumpy()) assert_array_equal(out.asnumpy(), numpy.array(0, dtype=dtype)) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index 50a9ae5aa36..3caaaf9c805 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -8,7 +8,12 @@ import dpnp -from .helper import assert_dtype_allclose, get_all_dtypes +from .helper import ( + assert_dtype_allclose, + get_all_dtypes, + get_float_complex_dtypes, + has_support_aspect64, +) @pytest.mark.parametrize( @@ -61,29 +66,35 @@ def test_max_min_out(func): ia = dpnp.array(a) np_res = getattr(numpy, func)(a, axis=0) + # output is dpnp array dpnp_res = dpnp.array(numpy.empty_like(np_res)) getattr(dpnp, func)(ia, axis=0, out=dpnp_res) assert_allclose(dpnp_res, np_res) + # output is usm array dpnp_res = dpt.asarray(numpy.empty_like(np_res)) getattr(dpnp, func)(ia, axis=0, out=dpnp_res) assert_allclose(dpnp_res, np_res) + # output is numpy array -> Error dpnp_res = numpy.empty_like(np_res) with pytest.raises(TypeError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # output has incorrect shape -> Error dpnp_res = dpnp.array(numpy.empty((2, 3))) with pytest.raises(ValueError): getattr(dpnp, func)(ia, axis=0, out=dpnp_res) @pytest.mark.parametrize("func", ["max", "min"]) -def test_max_min_NotImplemented(func): +def test_max_min_error(func): ia = dpnp.arange(5) - + # where is not supported with pytest.raises(NotImplementedError): getattr(dpnp, func)(ia, where=False) + + # initial is not supported with pytest.raises(NotImplementedError): getattr(dpnp, func)(ia, initial=6) @@ -118,7 +129,10 @@ def test_mean_dtype(self, dtype): result = dpnp.mean(dp_array, dtype=dtype) assert_allclose(expected, result) - @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", + "suppress_mean_empty_slice_numpy_warnings", + ) @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) def test_mean_empty(self, axis, shape): @@ -149,63 +163,308 @@ def test_mean_scalar(self): expected = np_array.mean() assert_allclose(expected, result) - def test_mean_NotImplemented(func): + def test_mean_NotImplemented(self): ia = dpnp.arange(5) with pytest.raises(NotImplementedError): dpnp.mean(ia, where=False) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize( - "array", - [ - [2, 0, 6, 2], - [2, 0, 6, 2, 5, 6, 7, 8], - [], - [2, 1, numpy.nan, 5, 3], - [-1, numpy.nan, 1, numpy.inf], - [3, 6, 0, 1], - [3, 6, 0, 1, 8], - [3, 2, 9, 6, numpy.nan], - [numpy.nan, numpy.nan, numpy.inf, numpy.nan], - [[2, 0], [6, 2]], - [[2, 0, 6, 2], [5, 6, 7, 8]], - [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], - [[-1, numpy.nan], [1, numpy.inf]], - [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], - ], - ids=[ - "[2, 0, 6, 2]", - "[2, 0, 6, 2, 5, 6, 7, 8]", - "[]", - "[2, 1, np.nan, 5, 3]", - "[-1, np.nan, 1, np.inf]", - "[3, 6, 0, 1]", - "[3, 6, 0, 1, 8]", - "[3, 2, 9, 6, np.nan]", - "[np.nan, np.nan, np.inf, np.nan]", - "[[2, 0], [6, 2]]", - "[[2, 0, 6, 2], [5, 6, 7, 8]]", - "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", - "[[-1, np.nan], [1, np.inf]]", - "[[np.nan, np.nan], [np.inf, np.nan]]", - ], -) -@pytest.mark.parametrize( - "dtype", get_all_dtypes(no_none=True, no_bool=True, no_complex=True) -) -def test_nanvar(array, dtype): - dtype = dpnp.default_float_type() - a = numpy.array(array, dtype=dtype) - ia = dpnp.array(a) - for ddof in range(a.ndim): - expected = numpy.nanvar(a, ddof=ddof) - result = dpnp.nanvar(ia, ddof=ddof) - assert_allclose(expected, result, rtol=1e-06) - - expected = numpy.nanvar(a, axis=None, ddof=0) - result = dpnp.nanvar(ia, axis=None, ddof=0) - assert_allclose(expected, result, rtol=1e-06) +class TestVar: + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) + def test_var(self, dtype, axis, keepdims, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.var(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + + if axis == 0 and ddof == 2: + assert dpnp.all(dpnp.isnan(result)) + else: + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("ddof", [0, 1]) + def test_var_out(self, dtype, axis, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, axis=axis, ddof=ddof) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(dp_array.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.var(dp_array, axis=axis, out=result, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) + def test_var_empty(self, axis, shape): + dp_array = dpnp.empty(shape, dtype=dpnp.int64) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.var(dp_array, axis=axis) + expected = numpy.var(np_array, axis=axis) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_var_strided(self, dtype): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.var(dp_array[::-1]) + expected = numpy.var(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.var(dp_array[::2]) + expected = numpy.var(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_var_dtype(self, dt_in, dt_out): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.var(np_array, dtype=dt_out) + result = dpnp.var(dp_array, dtype=dt_out) + assert expected.dtype == result.dtype + assert_allclose(result, expected, rtol=1e-06) + + def test_var_scalar(self): + dp_array = dpnp.array(5) + np_array = dpnp.asnumpy(dp_array) + + result = dp_array.var() + expected = np_array.var() + assert_allclose(expected, result) + + def test_var_error(self): + ia = dpnp.arange(5) + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.var(ia, where=False) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.var(ia, ddof="1") + + +class TestStd: + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) + def test_std(self, dtype, axis, keepdims, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.std(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + if axis == 0 and ddof == 2: + assert dpnp.all(dpnp.isnan(result)) + else: + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("ddof", [0, 1]) + def test_std_out(self, dtype, axis, ddof): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, axis=axis, ddof=ddof) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(dp_array.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.std(dp_array, axis=axis, out=result, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) + def test_std_empty(self, axis, shape): + dp_array = dpnp.empty(shape, dtype=dpnp.int64) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.std(dp_array, axis=axis) + expected = numpy.std(np_array, axis=axis) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_std_strided(self, dtype): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.std(dp_array[::-1]) + expected = numpy.std(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.std(dp_array[::2]) + expected = numpy.std(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_std_dtype(self, dt_in, dt_out): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.std(np_array, dtype=dt_out) + result = dpnp.std(dp_array, dtype=dt_out) + assert expected.dtype == result.dtype + assert_allclose(result, expected, rtol=1e-6) + + def test_std_scalar(self): + dp_array = dpnp.array(5) + np_array = dpnp.asnumpy(dp_array) + + result = dp_array.std() + expected = np_array.std() + assert_dtype_allclose(result, expected) + + def test_std_error(self): + ia = dpnp.arange(5) + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.std(ia, where=False) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.std(ia, ddof="1") + + +class TestNanVar: + @pytest.mark.parametrize( + "array", + [ + [2, 0, 6, 2], + [2, 0, 6, 2, 5, 6, 7, 8], + [], + [2, 1, numpy.nan, 5, 3], + [-1, numpy.nan, 1, numpy.inf], + [3, 6, 0, 1], + [3, 6, 0, 1, 8], + [3, 2, 9, 6, numpy.nan], + [numpy.nan, numpy.nan, numpy.inf, numpy.nan], + [[2, 0], [6, 2]], + [[2, 0, 6, 2], [5, 6, 7, 8]], + [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], + [[-1, numpy.nan], [1, numpy.inf]], + [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], + ], + ids=[ + "[2, 0, 6, 2]", + "[2, 0, 6, 2, 5, 6, 7, 8]", + "[]", + "[2, 1, np.nan, 5, 3]", + "[-1, np.nan, 1, np.inf]", + "[3, 6, 0, 1]", + "[3, 6, 0, 1, 8]", + "[3, 2, 9, 6, np.nan]", + "[np.nan, np.nan, np.inf, np.nan]", + "[[2, 0], [6, 2]]", + "[[2, 0, 6, 2], [5, 6, 7, 8]]", + "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", + "[[-1, np.nan], [1, np.inf]]", + "[[np.nan, np.nan], [np.inf, np.nan]]", + ], + ) + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_none=True, no_bool=True) + ) + def test_nanvar(self, array, dtype): + try: + a = numpy.array(array, dtype=dtype) + except: + pytest.skip("floating datat type is needed to store NaN") + ia = dpnp.array(a) + for ddof in range(a.ndim): + expected = numpy.nanvar(a, ddof=ddof) + result = dpnp.nanvar(ia, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1), (1, 2)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2, 3]) + def test_nanvar_out(self, dtype, axis, keepdims, ddof): + a = numpy.arange(4 * 3 * 5, dtype=dtype) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanvar(a, axis=axis, ddof=ddof, keepdims=keepdims) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(ia.device) + result = dpnp.empty(expected.shape, dtype=res_dtype) + dpnp.nanvar(ia, out=result, axis=axis, ddof=ddof, keepdims=keepdims) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_float_complex_dtypes()) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_nanvar_dtype(self, dt_in, dt_out): + a = numpy.arange(4 * 3 * 5, dtype=dt_in) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanvar(a, dtype=dt_out) + result = dpnp.nanvar(ia, dtype=dt_out) + assert_dtype_allclose(result, expected) + + def test_nanvar_error(self): + ia = dpnp.arange(5, dtype=dpnp.float32) + ia[0] = dpnp.nan + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.nanvar(ia, where=False) + + # dtype should be floating + with pytest.raises(TypeError): + dpnp.nanvar(ia, dtype=dpnp.int32) + + # out dtype should be inexact + res = dpnp.empty((1,), dtype=dpnp.int32) + with pytest.raises(TypeError): + dpnp.nanvar(ia, out=res) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.nanvar(ia, ddof="1") @pytest.mark.usefixtures("allow_fall_back_on_numpy") diff --git a/tests/test_sum.py b/tests/test_sum.py index 4104b33a624..25c294d051e 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -7,6 +7,7 @@ import dpnp from tests.helper import ( assert_dtype_allclose, + get_all_dtypes, get_float_dtypes, has_support_aspect64, ) @@ -65,3 +66,24 @@ def test_sum_axis(): else: expected = numpy.sum(a, axis=1) assert_array_equal(expected, result) + + +@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) +@pytest.mark.parametrize("axis", [0, 1, (0, 1)]) +def test_sum_out(dtype, axis): + a = dpnp.arange(2 * 4, dtype=dtype).reshape(2, 4) + a_np = dpnp.asnumpy(a) + + expected = numpy.sum(a_np, axis=axis) + res = dpnp.empty(expected.shape, dtype=dtype) + a.sum(axis=axis, out=res) + assert_array_equal(expected, res.asnumpy()) + + +def test_sum_NotImplemented(): + ia = dpnp.arange(5) + with pytest.raises(NotImplementedError): + dpnp.sum(ia, where=False) + + with pytest.raises(NotImplementedError): + dpnp.sum(ia, initial=1) diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index 19104d7bd43..b1175703b01 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -373,6 +373,7 @@ def test_meshgrid(device_x, device_y): pytest.param("nancumsum", [1.0, dpnp.nan]), pytest.param("nanprod", [1.0, dpnp.nan]), pytest.param("nansum", [1.0, dpnp.nan]), + pytest.param("nanvar", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), pytest.param("prod", [1.0, 2.0]), @@ -387,6 +388,7 @@ def test_meshgrid(device_x, device_y): ), pytest.param("sinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("sqrt", [1.0, 3.0, 9.0]), + pytest.param("std", [1.0, 2.0, 4.0, 7.0]), pytest.param("sum", [1.0, 2.0]), pytest.param( "tan", [-dpnp.pi / 2, -dpnp.pi / 4, 0.0, dpnp.pi / 4, dpnp.pi / 2] @@ -394,6 +396,7 @@ def test_meshgrid(device_x, device_y): pytest.param("tanh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("trapz", [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]), pytest.param("trunc", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("var", [1.0, 2.0, 4.0, 7.0]), ], ) @pytest.mark.parametrize( diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index fa2931cc505..4da04c2d675 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -395,6 +395,7 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("log1p", [1.0e-10, 1.0, 2.0, 4.0, 7.0]), pytest.param("log2", [1.0, 2.0, 4.0, 7.0]), pytest.param("nanprod", [1.0, 2.0, dp.nan]), + pytest.param("nanvar", [1.0, 2.0, 4.0, dp.nan]), pytest.param("max", [1.0, 2.0, 4.0, 7.0]), pytest.param("mean", [1.0, 2.0, 4.0, 7.0]), pytest.param("min", [1.0, 2.0, 4.0, 7.0]), @@ -414,11 +415,13 @@ def test_meshgrid(usm_type_x, usm_type_y): ), pytest.param("sinh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("sqrt", [1.0, 3.0, 9.0]), + pytest.param("std", [1.0, 2.0, 4.0, 7.0]), pytest.param( "tan", [-dp.pi / 2, -dp.pi / 4, 0.0, dp.pi / 4, dp.pi / 2] ), pytest.param("tanh", [-5.0, -3.5, 0.0, 3.5, 5.0]), pytest.param("trunc", [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]), + pytest.param("var", [1.0, 2.0, 4.0, 7.0]), ], ) @pytest.mark.parametrize("usm_type", list_of_usm_types, ids=list_of_usm_types) diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index 0728382a5b4..fc94b329665 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -8,7 +8,6 @@ from tests.third_party.cupy import testing -@testing.gpu class TestSumprod(unittest.TestCase): def tearDown(self): # Free huge memory for slow test @@ -19,75 +18,52 @@ def tearDown(self): # Note: numpy.sum() always upcast integers to (u)int64 and float32 to # float64 for dtype=None. `np.sum` does that too for integers, but not for # float32, so we need to special-case it for these tests + def _get_dtype_kwargs(self, xp, dtype): + if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): + return {"dtype": numpy.float64} + return {} + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all_keepdims(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype, keepdims=True) - else: - return a.sum(keepdims=True) + return a.sum(**self._get_dtype_kwargs(xp, dtype), keepdims=True) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_external_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return xp.sum(a, dtype=dtype) - else: - return xp.sum(a) + return xp.sum(a, **self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=False) def test_sum_all_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype) - else: - return a.sum() + return a.sum(**self._get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(dtype=dtype, axis=1) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.slow @testing.numpy_cupy_allclose() @@ -99,11 +75,7 @@ def test_sum_axis_huge(self, xp): @testing.numpy_cupy_allclose() def test_external_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return xp.sum(a, axis=1, dtype=dtype) - else: - return xp.sum(a, axis=1) + return xp.sum(a, **self._get_dtype_kwargs(xp, dtype), axis=1) # float16 is omitted, since NumPy's sum on float16 arrays has more error # than CuPy's. @@ -111,71 +83,49 @@ def test_external_sum_axis(self, xp, dtype): @testing.numpy_cupy_allclose() def test_sum_axis2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=1, dtype=dtype) - else: - return a.sum(axis=1) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 3), dtype=dtype) - else: - return a.sum(axis=(1, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4) def test_sum_axes2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 3), dtype=dtype) - else: - return a.sum(axis=(1, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes3(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(0, 2, 3), dtype=dtype) - else: - return a.sum(axis=(0, 2, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes4(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(0, 2, 3), dtype=dtype) - else: - return a.sum(axis=(0, 2, 3)) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose() + def test_sum_empty_axis(self, xp, dtype): + a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) + return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=()) @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() @@ -193,11 +143,9 @@ def test_sum_keepdims_and_dtype(self, xp, src_dtype, dst_dtype): @testing.numpy_cupy_allclose() def test_sum_keepdims_multiple_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - dtype = numpy.float64 - return a.sum(axis=(1, 2), keepdims=True, dtype=dtype) - else: - return a.sum(axis=(1, 2), keepdims=True) + return a.sum( + **self._get_dtype_kwargs(xp, dtype), axis=(1, 2), keepdims=True + ) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() diff --git a/tests/third_party/cupy/statistics_tests/test_meanvar.py b/tests/third_party/cupy/statistics_tests/test_meanvar.py index 738057a99f4..de2eb22604f 100644 --- a/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -4,7 +4,7 @@ import pytest import dpnp as cupy -from tests.helper import has_support_aspect64 +from tests.helper import has_support_aspect16, has_support_aspect64 from tests.third_party.cupy import testing ignore_runtime_warnings = pytest.mark.filterwarnings( @@ -152,7 +152,6 @@ def test_returned(self, dtype): self.check_returned(a, axis=1, weights=w) -@testing.gpu class TestMeanVar(unittest.TestCase): @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) @@ -197,51 +196,47 @@ def test_mean_all_complex_dtype(self, xp, dtype): return xp.mean(a, dtype=numpy.complex64) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.var() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.var(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.var(ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.var(a, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.var(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.var(a, axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_var_axis_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.var(axis=1, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_var_axis_ddof(self, xp, dtype): @@ -249,51 +244,47 @@ def test_external_var_axis_ddof(self, xp, dtype): return xp.var(a, axis=1, ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.std() @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.std(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return a.std(ddof=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_all_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.std(a, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.std(axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.std(a, axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_std_axis_ddof(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.std(axis=1, ddof=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_std_axis_ddof(self, xp, dtype): @@ -382,11 +373,10 @@ def test_nanmean_all_nan(self, xp): } ) ) -@testing.gpu class TestNanVarStd(unittest.TestCase): - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) if a.dtype.kind not in "biu": @@ -395,8 +385,7 @@ def test_nanvar(self, xp, dtype): a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims ) - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) @@ -407,11 +396,10 @@ def test_nanstd(self, xp, dtype): ) -@testing.gpu class TestNanVarStdAdditional(unittest.TestCase): - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30)) @@ -424,8 +412,8 @@ def test_nanvar_out(self, xp, dtype): return z @testing.slow - @testing.for_all_dtypes(no_float16=True, no_complex=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -434,14 +422,16 @@ def test_nanvar_huge(self, xp, dtype): return xp.nanvar(a, axis=1) - @testing.numpy_cupy_allclose(rtol=1e-4) + @pytest.mark.skipif( + not has_support_aspect16(), reason="No fp16 support by device" + ) + @testing.numpy_cupy_allclose(rtol=1e-3) def test_nanvar_float16(self, xp): a = testing.shaped_arange((4, 5), xp, numpy.float16) a[0][0] = xp.nan return xp.nanvar(a, axis=0) - @ignore_runtime_warnings - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) @@ -455,7 +445,7 @@ def test_nanstd_out(self, xp, dtype): return z @testing.slow - @testing.for_all_dtypes(no_float16=True, no_complex=True) + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanstd_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -487,8 +477,11 @@ def test_nanstd_float16(self, xp): } ) ) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu +@pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", + "suppress_dof_numpy_warnings", + "suppress_mean_empty_slice_numpy_warnings", +) class TestProductZeroLength(unittest.TestCase): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64())