diff --git a/benchmark/python/einsum/benchmark_einsum.py b/benchmark/python/einsum/benchmark_einsum.py index 6de8223287da..3d1a708d86a3 100644 --- a/benchmark/python/einsum/benchmark_einsum.py +++ b/benchmark/python/einsum/benchmark_einsum.py @@ -83,5 +83,5 @@ def test_np_einsum(): if __name__ == "__main__": - npx.set_np() + npx.set_np(dtype=False) test_np_einsum() diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py index f0760f4a1bca..1d6349fb837e 100644 --- a/benchmark/python/ffi/benchmark_ffi.py +++ b/benchmark/python/ffi/benchmark_ffi.py @@ -51,6 +51,9 @@ def generate_workloads(): def prepare_workloads(): pool = generate_workloads() OpArgMngr.add_workload("zeros", (2, 2)) + OpArgMngr.add_workload("full", (2, 2), 10) + OpArgMngr.add_workload("identity", 3) + OpArgMngr.add_workload("ones", (2, 2)) OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False) OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1) OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1'])) @@ -244,7 +247,7 @@ def show_results(results): import numpy as onp from mxnet import np as dnp - mx.npx.set_np() + mx.npx.set_np(dtype=False) packages = { "onp": { "module": onp, diff --git a/include/mxnet/c_api.h b/include/mxnet/c_api.h index d3f0cc96625c..524dd78e53e6 100644 --- a/include/mxnet/c_api.h +++ b/include/mxnet/c_api.h @@ -1305,6 +1305,20 @@ MXNET_DLL int MXIsNumpyShape(int* curr); * \return 0 when success, -1 when failure happens */ MXNET_DLL int MXSetIsNumpyShape(int is_np_shape, int* prev); +/*! + * \brief get numpy default data type + * \param curr returns the current status + * \return 0 when success, -1 when failure happens + */ +MXNET_DLL int MXIsNumpyDefaultDtype(bool* curr); +/*! + * \brief set numpy default data type + * \param dtype_flag false when default dtype is flaot32, + * true when default dtype is flaot64. + * \param prev returns the previous status before this set + * \return 0 when success, -1 when failure happens + */ +MXNET_DLL int MXSetIsNumpyDefaultDtype(bool dtype_flag, bool* prev); /*! * \brief mark NDArrays as variables to compute gradient for autograd * \param num_var number of variable NDArrays diff --git a/include/mxnet/imperative.h b/include/mxnet/imperative.h index ca6f9353f9bd..c3e2697b3d8d 100644 --- a/include/mxnet/imperative.h +++ b/include/mxnet/imperative.h @@ -46,6 +46,7 @@ namespace mxnet { * turn off numpy shape flag globally. * */ enum NumpyShape{Off, ThreadLocalOn, GlobalOn}; + typedef NumpyShape NumpyDefaultDtype; /*! \brief runtime functions for NDArray */ class Imperative { public: @@ -189,9 +190,11 @@ class Imperative { * */ int is_np_shape() const { if (is_np_shape_global_) { - return 2; + return NumpyShape::GlobalOn; } - return is_np_shape_thread_local_ ? 1 : 0; + return is_np_shape_thread_local_ ? + NumpyShape::ThreadLocalOn : + NumpyShape::Off; } /*! \brief specify numpy compatibility off, thread local on or global on. */ bool set_is_np_shape(int is_np_shape) { @@ -212,6 +215,24 @@ class Imperative { } return old; } + /*! \brief return current numpy default dtype compatibility status. + * */ + bool is_np_default_dtype() const { + if (is_np_default_dtype_global_) { + return true; + } + return false; + } + /*! \brief specify numpy default dtype off or global on. */ + bool set_is_np_default_dtype(bool is_np_default_dtype) { + bool old = this->is_np_default_dtype(); + if (is_np_default_dtype) { + is_np_default_dtype_global_ = true; + } else { + is_np_default_dtype_global_ = false; + } + return old; + } /*! \brief to record operator, return corresponding node. */ void RecordOp(nnvm::NodeAttrs&& attrs, const std::vector& inputs, @@ -301,6 +322,7 @@ class Imperative { static MX_THREAD_LOCAL bool is_np_shape_thread_local_; #endif bool is_np_shape_global_{false}; + bool is_np_default_dtype_global_{false}; /*! \brief node count used for naming */ std::atomic node_count_{0}; /*! \brief variable count used for naming */ diff --git a/python/mxnet/__init__.py b/python/mxnet/__init__.py index 49f10aace531..3700512f6d2b 100644 --- a/python/mxnet/__init__.py +++ b/python/mxnet/__init__.py @@ -25,6 +25,7 @@ from .base import MXNetError from .util import is_np_shape, set_np_shape, np_shape, use_np_shape from .util import is_np_array, np_array, use_np_array, use_np +from .util import is_np_default_dtype, np_default_dtype, use_np_default_dtype from . import base # version info diff --git a/python/mxnet/gluon/data/dataloader.py b/python/mxnet/gluon/data/dataloader.py index d34148417355..710303702d98 100644 --- a/python/mxnet/gluon/data/dataloader.py +++ b/python/mxnet/gluon/data/dataloader.py @@ -407,7 +407,7 @@ def __len__(self): def _thread_worker_initializer(active_shape, active_array): """Initializer for ThreadPool.""" - set_np(shape=active_shape, array=active_array) + set_np(shape=active_shape, array=active_array, dtype=False) _worker_dataset = None @@ -418,7 +418,7 @@ def _worker_initializer(dataset, active_shape, active_array): # can be passed as argument global _worker_dataset _worker_dataset = dataset - set_np(shape=active_shape, array=active_array) + set_np(shape=active_shape, array=active_array, dtype=False) def _worker_fn(samples, batchify_fn, dataset=None): """Function for processing data in worker process.""" diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 872caab3feae..16274cad094b 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -23,6 +23,7 @@ from ...base import numeric_types, integer_types from ...util import _sanity_check_params, set_module from ...util import wrap_np_unary_func, wrap_np_binary_func +from ...util import is_np_default_dtype from ...context import current_context from . import _internal as _npi from . import _api_internal @@ -97,10 +98,12 @@ def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type. Default is `numpy.float32`. Note that this - behavior is different from NumPy's `zeros` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `zeros` function where `float64` + is the default value, here we can set 'float32' or 'float64' as your default dtype, + because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -126,7 +129,7 @@ def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined- @set_module('mxnet.ndarray.numpy') -def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name +def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name """Return a new array of given shape and type, filled with ones. This function currently only supports storing multi-dimensional data in row-major (C-style). @@ -136,10 +139,11 @@ def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=rede shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type. Default is `numpy.float32`. Note that this - behavior is different from NumPy's `ones` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `ones` function where + `float64` is the default value. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -154,9 +158,12 @@ def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=rede if order != 'C': raise NotImplementedError if ctx is None: - ctx = current_context() - dtype = _np.float32 if dtype is None else dtype - return _npi.ones(shape=shape, ctx=ctx, dtype=dtype) + ctx = str(current_context()) + else: + ctx = str(ctx) + if dtype is not None and not isinstance(dtype, str): + dtype = _np.dtype(dtype).name + return _api_internal.ones(shape, dtype, ctx) # pylint: disable=too-many-arguments, redefined-outer-name @@ -318,6 +325,8 @@ def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylin dtype : data-type, optional The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. @@ -361,19 +370,22 @@ def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylin """ if order != 'C': raise NotImplementedError - if ctx is None: - ctx = current_context() if isinstance(fill_value, NDArray): if dtype is None: ret = broadcast_to(fill_value, shape) else: ret = broadcast_to(fill_value, shape).astype(dtype) return ret + if ctx is None: + ctx = str(current_context()) + else: + ctx = str(ctx) if isinstance(fill_value, bool): fill_value = int(fill_value) dtype = _np.bool if dtype is None else dtype - dtype = _np.float32 if dtype is None else dtype - return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out) + if dtype is not None and not isinstance(dtype, str): + dtype = _np.dtype(dtype).name + return _api_internal.full(shape, dtype, fill_value, ctx, out) # pylint: enable=too-many-arguments, redefined-outer-name @@ -537,7 +549,9 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype - The type of the output array. The default is `float32`. + The type of the output array. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- @@ -549,9 +563,7 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): this rule may result in the last element of `out` being greater than `stop`. """ - if dtype is None: - dtype = 'float32' - if dtype is not isinstance(dtype, str): + if dtype is not None and not isinstance(dtype, str): dtype = _np.dtype(dtype).name if ctx is None: ctx = str(current_context()) @@ -582,7 +594,9 @@ def identity(n, dtype=None, ctx=None): n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional - Data-type of the output. Defaults to ``numpy.float32``. + Data-type of the output. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional An optional device context (default is the current default context). @@ -595,7 +609,6 @@ def identity(n, dtype=None, ctx=None): Examples -------- >>> np.identity(3) - >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) @@ -605,9 +618,13 @@ def identity(n, dtype=None, ctx=None): if n < 0: raise ValueError("Input 'n' cannot be negative") if ctx is None: - ctx = current_context() - dtype = _np.float32 if dtype is None else dtype - return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype) + ctx = str(current_context()) + else: + ctx = str(ctx) + shape = (n, n) # pylint: disable=redefined-outer-name + if dtype is not None and not isinstance(dtype, str): + dtype = _np.dtype(dtype).name + return _api_internal.identity(shape, dtype, ctx) # pylint: disable=redefined-outer-name @@ -1098,7 +1115,9 @@ def divide(x1, x2, out=None, **kwargs): according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. - * If both inputs are of integer types (including boolean), the output is of float32 type. + * If both inputs are of integer types (including boolean), the output is of default dtype. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. """ if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): return _np.divide(x1, x2, out=out) @@ -1137,7 +1156,9 @@ def true_divide(x1, x2, out=None): according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. - * If both inputs are of integer types (including boolean), the output is of float32 type. + * If both inputs are of integer types (including boolean), the output is of default dtype. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. """ if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): return _np.true_divide(x1, x2, out=out) @@ -1751,7 +1772,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): @set_module('mxnet.ndarray.numpy') -def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): +def eye(N, M=None, k=0, dtype=float, **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. @@ -1767,6 +1788,8 @@ def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. + - When npx.is_np_default_dtype() returns Flase, default dtype is float32; + - When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- @@ -1780,6 +1803,8 @@ def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): ctx = str(current_context()) else: ctx = str(ctx) + if dtype is None or dtype is float: + dtype = _np.float64 if is_np_default_dtype() else _np.float32 if dtype is not None and not isinstance(dtype, str): dtype = _np.dtype(dtype).name return _api_internal.eye(N, M, k, ctx, dtype) @@ -4668,11 +4693,11 @@ def clip(a, a_min, a_max, out=None): -------- >>> a = np.arange(10) >>> np.clip(a, 1, 8) - array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32) + array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.]) >>> a - array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32) + array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.clip(a, 3, 6, out=a) - array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32) + array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) """ if a_min is None and a_max is None: raise ValueError('array_clip: must set either max or min') @@ -4939,7 +4964,9 @@ def average(a, axis=None, weights=None, returned=False, out=None): Return the average along the specified axis. When returned is True, return a tuple with the average as the first element and the sum of the weights as the second element. sum_of_weights is of the same type as retval. - If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a. + If a is integral, the result dtype will be current default dtype, otherwise it will be the same + as dtype of a. (i.e. When npx.is_np_default_dtype() returns Flase, default dtype is float32; When + npx.is_np_default_dtype() returns True, default dtype is float64.) Raises -------- @@ -4962,7 +4989,9 @@ def average(a, axis=None, weights=None, returned=False, out=None): - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens - Does not support complex dtype - The dtypes of a and weights must be the same - - Integral a results in float32 returned dtype, not float64 + - Integral a results in default dtype. + i.e. When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Examples -------- @@ -5002,8 +5031,10 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default is float32; - for floating point inputs, it is the same as the input dtype. + Type to use in computing the mean. + For integer inputs, the default is your current default dtype (i.e. When npx.is_np_default_dtype() returns + Flase, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64.); + For floating point inputs, it is the same as the input dtype. out : ndarray, optional Alternate output array in which to place the result. The default is None; if provided, it must have the same shape and type as the expected output @@ -5025,7 +5056,9 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable `_ in the following way(s): - only ndarray is accepted as valid input, python iterables or scalar is not supported - - default data type for integer input is float32 + - default data type for integer input is float32 or float64, which depends on your current default dtype. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) @@ -5131,9 +5164,11 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as - the array type. + Type to use in computing the variance. + For arrays of integer type the default is `float32` or 'float64', + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; + For arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output, but the type is cast if @@ -5183,7 +5218,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') -def indices(dimensions, dtype=_np.int32, ctx=None): +def indices(dimensions, dtype=None, ctx=None): """Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0,1,... @@ -5194,7 +5229,7 @@ def indices(dimensions, dtype=_np.int32, ctx=None): dimensions : sequence of ints The shape of the grid. dtype : data-type, optional - The desired data-type for the array. Default is `float32`. + The desired data-type for the array. Default is `int64`. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. @@ -5224,10 +5259,10 @@ def indices(dimensions, dtype=_np.int32, ctx=None): (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], - [1, 1, 1]]) + [1, 1, 1]], dtype=int64) >>> grid[1] # column indices array([[0, 0, 0], - [1, 1, 1]], dtype=int32) + [1, 1, 1]], dtype=int64) The indices can be used as an index into an array. @@ -5245,6 +5280,8 @@ def indices(dimensions, dtype=_np.int32, ctx=None): ctx = str(current_context()) else: ctx = str(ctx) + if dtype is not None and not isinstance(dtype, str): + dtype = _np.dtype(dtype).name return _api_internal.indices(dimensions, dtype, ctx) else: raise ValueError("The dimensions must be sequence of ints") @@ -5472,7 +5509,7 @@ def diag_indices_from(arr): @set_module('mxnet.ndarray.numpy') -def hanning(M, dtype=_np.float32, ctx=None): +def hanning(M, dtype=None, ctx=None): r"""Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. @@ -5482,9 +5519,6 @@ def hanning(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5493,6 +5527,9 @@ def hanning(M, dtype=_np.float32, ctx=None): out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -5558,7 +5595,7 @@ def hanning(M, dtype=_np.float32, ctx=None): @set_module('mxnet.ndarray.numpy') -def hamming(M, dtype=_np.float32, ctx=None): +def hamming(M, dtype=None, ctx=None): r"""Return the hamming window. The hamming window is a taper formed by using a weighted cosine. @@ -5568,9 +5605,6 @@ def hamming(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5579,6 +5613,9 @@ def hamming(M, dtype=_np.float32, ctx=None): out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -5642,7 +5679,7 @@ def hamming(M, dtype=_np.float32, ctx=None): @set_module('mxnet.ndarray.numpy') -def blackman(M, dtype=_np.float32, ctx=None): +def blackman(M, dtype=None, ctx=None): r"""Return the Blackman window. The Blackman window is a taper formed by using the first three @@ -5655,9 +5692,6 @@ def blackman(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5666,6 +5700,9 @@ def blackman(M, dtype=_np.float32, ctx=None): out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- diff --git a/python/mxnet/ndarray/numpy/random.py b/python/mxnet/ndarray/numpy/random.py index 4f5f024f9236..8f98f2df4428 100644 --- a/python/mxnet/ndarray/numpy/random.py +++ b/python/mxnet/ndarray/numpy/random.py @@ -17,6 +17,7 @@ """Namespace for operators used in Gluon dispatched by F=ndarray.""" import numpy as np +from ...util import is_np_default_dtype from ...context import current_context from . import _internal as _npi from . import _api_internal @@ -76,7 +77,7 @@ def randint(low, high=None, size=None, dtype=None, ctx=None, out=None): >>> np.random.randint(5, size=(2, 4)) array([[4, 0, 2, 1], - [3, 2, 2, 0]]) + [3, 2, 2, 0]]) """ if dtype is None: dtype = 'int' @@ -112,7 +113,9 @@ def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None): a scalar tensor containing a single value is returned if ``low`` and ``high`` are both scalars. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32' + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. out : ``ndarray``, optional @@ -123,8 +126,6 @@ def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None): out : ndarray Drawn samples from the parameterized uniform distribution. """ - if dtype is None: - dtype = 'float32' if ctx is None: ctx = str(current_context()) else: @@ -154,7 +155,9 @@ def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): samples are drawn. If size is `None` (default), a scalar tensor containing a single value is returned if loc and scale are both scalars. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32' + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. out : ``ndarray``, optional @@ -165,8 +168,6 @@ def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): out : ndarray Drawn samples from the parameterized normal distribution. """ - if dtype is None: - dtype = 'float32' if ctx is None: ctx = str(current_context()) else: @@ -706,7 +707,9 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None): a single value is returned if ``shape`` and ``scale`` are both scalars. Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -719,8 +722,6 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None): electronic components, and arises naturally in processes for which the waiting times between Poisson distributed events are relevant. """ - if dtype is None: - dtype = 'float32' if out is not None: size = out.shape if size == (): @@ -763,7 +764,9 @@ def beta(a, b, size=None, dtype=None, ctx=None): a single value is returned if ``a`` and ``b`` are both scalars. Otherwise, ``np.broadcast(a, b).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -777,7 +780,7 @@ def beta(a, b, size=None, dtype=None, ctx=None): Drawn samples from the parameterized beta distribution. """ if dtype is None: - dtype = 'float32' + dtype = np.float64 if is_np_default_dtype() else np.float32 if ctx is None: ctx = current_context() if size == (): @@ -874,7 +877,9 @@ def chisquare(df, size=None, dtype=None, ctx=None): a single value is returned if ``df`` is a scalar. Otherwise, ``np.array(df).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Dtype 'float32' or 'float64' is strongly recommended, since lower precision might lead to out of range issue. ctx : Context, optional @@ -922,7 +927,7 @@ def chisquare(df, size=None, dtype=None, ctx=None): array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random """ if dtype is None: - dtype = 'float32' + dtype = np.float64 if is_np_default_dtype() else np.float32 if ctx is None: ctx = current_context() if size == (): diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index bef75eac6f7b..2823d7914aee 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -42,7 +42,8 @@ from ..base import check_call, _LIB, NDArrayHandle, c_array from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types from ..context import Context -from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func +from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\ + is_np_default_dtype from ..context import current_context from ..ndarray import numpy as _mx_nd_np from ..ndarray.numpy import _internal as _npi @@ -1122,9 +1123,9 @@ def as_np_ndarray(self): def __repr__(self): """ - Returns a string representation of the array. The dtype of the ndarray will not - be appended to the string if it is `float32`. The context of the ndarray will - be appended for devices other than CPU. + Returns a string representation of the array. + The dtype of the ndarray will be appended if it's inconsistent with current dtype. + The context of the ndarray will be appended for devices other than CPU. Examples -------- @@ -1137,7 +1138,15 @@ def __repr__(self): [[0.5488135 0.5928446 0.71518934] [0.84426576 0.60276335 0.8579456 ]] >>> a.dtype - + dtype('float32') + >>> npx.set_np_float64() + >>> a + array([[0.5488135 , 0.5928446 , 0.71518934], + [0.84426576, 0.60276335, 0.8579456 ]], dtype=float32) + >>> npx.set_np_float64(default_float64=False) + >>> a + array([[0.5488135 , 0.5928446 , 0.71518934], + [0.84426576, 0.60276335, 0.8579456 ]]) >>> b = a.astype(np.float64) >>> b array([[0.54881352, 0.59284461, 0.71518934], @@ -1146,7 +1155,7 @@ def __repr__(self): [[0.54881352 0.59284461 0.71518934] [0.84426576 0.60276335 0.85794562]] >>> b.dtype - + dtype('float64') >>> c = a.copyto(npx.gpu(0)) >>> c array([[0.5488135 , 0.5928446 , 0.71518934], @@ -1161,13 +1170,15 @@ def __repr__(self): >>> print(d) [[0.54881352 0.59284461 0.71518934] [0.84426576 0.60276335 0.85794562]] @gpu(0) + """ array_str = self.asnumpy().__repr__() dtype = self.dtype + default_dtype = _np.float64 if is_np_default_dtype() else _np.float32 if 'dtype=' in array_str: - if dtype == _np.float32: + if dtype == default_dtype: array_str = array_str[:array_str.rindex(',')] + ')' - elif dtype not in (_np.float32, _np.bool_): + elif dtype not in (default_dtype, _np.bool_): array_str = array_str[:-1] + ', dtype={})'.format(dtype) context = self.ctx @@ -2205,17 +2216,19 @@ def tostype(self, stype): @set_module('mxnet.numpy') -def empty(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name +def empty(shape, dtype=float, order='C', ctx=None): # pylint: disable=redefined-outer-name """Return a new array of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional - Desired output data-type for the array, e.g, `numpy.int8`. Default is - `numpy.float32`. Note that this behavior is different from NumPy's `empty` - function where `float64` is the default value, because `float32` is - considered as the default data type in deep learning. + Desired output data-type for the array, e.g, `numpy.int8`. + Note that this behavior is different from NumPy's `empty` function where `float64` + is the default value, here you can set your default dtype as 'float32' or 'float64' + because `float32` is considered as the default data type in deep learning. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -2243,8 +2256,8 @@ def empty(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=red .format(str(order))) if ctx is None: ctx = current_context() - if dtype is None: - dtype = _np.float32 + if dtype is None or dtype is float: + dtype = _np.float64 if is_np_default_dtype() else _np.float32 if isinstance(shape, int): shape = (shape,) return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype)) @@ -2262,7 +2275,9 @@ def array(object, dtype=None, ctx=None): An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. dtype : data-type, optional - The desired data-type for the array. Default is `float32`. + The desired data-type for the array. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. @@ -2294,7 +2309,8 @@ def array(object, dtype=None, ctx=None): "from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.") else: if dtype is None: - dtype = object.dtype if hasattr(object, "dtype") else _np.float32 + default_dtype = _np.float64 if is_np_default_dtype() else _np.float32 + dtype = object.dtype if hasattr(object, "dtype") else default_dtype try: object = _np.array(object, dtype=dtype) except Exception as e: @@ -2355,10 +2371,12 @@ def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type (default is `numpy.float32`). Note that this - behavior is different from NumPy's `zeros` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `zeros` function where `float64` + is the default value, here we can set 'float32' or 'float64' as your default dtype, + because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -2386,7 +2404,7 @@ def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined- @set_module('mxnet.numpy') -def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name +def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name """Return a new array of given shape and type, filled with ones. This function currently only supports storing multi-dimensional data in row-major (C-style). @@ -2396,10 +2414,11 @@ def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=rede shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type. Default is `numpy.float32`. Note that this - behavior is different from NumPy's `ones` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type. Default is depend on your current default dtype. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `ones` function where + `float64` is the default value. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -2698,7 +2717,9 @@ def identity(n, dtype=None, ctx=None): n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional - Data-type of the output. Defaults to ``numpy.float32``. + Data-type of the output. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional An optional device context (default is the current default context). @@ -3069,7 +3090,10 @@ def divide(x1, x2, out=None, **kwargs): according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. - * If both inputs are of integer types (including boolean), the output is of float32 type. + * If both inputs are of integer types (including boolean), the output is of float32 or + float64 type, which depends on your current default dtype. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Examples -------- @@ -3111,7 +3135,10 @@ def true_divide(x1, x2, out=None): according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. - * If both inputs are of integer types (including boolean), the output is of float32 type. + * If both inputs are of integer types (including boolean), the output is of float32 or + float64 type, which depends on your current default dtype. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Examples -------- @@ -5171,7 +5198,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=redefined-outer-name @set_module('mxnet.numpy') -def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): +def eye(N, M=None, k=0, dtype=float, **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. @@ -5187,6 +5214,8 @@ def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- @@ -5750,7 +5779,8 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype - The type of the output array. The default is `float32`. + The type of the output array. The default is `float32` or 'float64', + which depends on your current default dtype. Returns ------- @@ -6590,11 +6620,11 @@ def clip(a, a_min, a_max, out=None): -------- >>> a = np.arange(10) >>> np.clip(a, 1, 8) - array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32) + array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.]) >>> a - array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32) + array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.clip(a, 3, 6, out=a) - array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32) + array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) """ from numbers import Number if isinstance(a, Number): @@ -6781,7 +6811,10 @@ def average(a, axis=None, weights=None, returned=False, out=None): Return the average along the specified axis. When returned is True, return a tuple with the average as the first element and the sum of the weights as the second element. sum_of_weights is of the same type as retval. - If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a. + If a is integral, the result dtype will be current default dtype, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; + otherwise it will be the same as dtype of a. Raises -------- @@ -6804,7 +6837,7 @@ def average(a, axis=None, weights=None, returned=False, out=None): - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens - Does not support complex dtype - The dtypes of a and weights must be the same - - Integral a results in float32 returned dtype, not float64 + - Integral a results in float32 or float64 returned dtype, which depends on your current default dtype Examples -------- @@ -6845,8 +6878,11 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default is float32; - for floating point inputs, it is the same as the input dtype. + Type to use in computing the mean. + For integer inputs, the default is of your current default dtype, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; + For floating point inputs, it is the same as the input dtype. out : ndarray, optional Alternate output array in which to place the result. The default is None; if provided, it must have the same shape and type as the expected output. @@ -6870,7 +6906,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable `_ in the following way(s): - only ndarray is accepted as valid input, python iterables or scalar is not supported - - default data type for integer input is float32 + - default data type for integer input is float32 or float64, which depends on your current default dtype Examples -------- @@ -6883,7 +6919,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable >>> np.mean(a) array(0.55) >>> np.mean(a, dtype=np.float64) - array(0.55) + array(0.55, dtype=float64) """ return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out) # pylint: enable=redefined-outer-name @@ -7028,9 +7064,11 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as - the array type. + Type to use in computing the variance. + For arrays of integer type, the default is of your current default dtype, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64. + For arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output, but the type is cast if @@ -7080,7 +7118,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: # pylint: disable=redefined-outer-name @set_module('mxnet.numpy') -def indices(dimensions, dtype=_np.int32, ctx=None): +def indices(dimensions, dtype=None, ctx=None): """Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0,1,... @@ -7091,7 +7129,7 @@ def indices(dimensions, dtype=_np.int32, ctx=None): dimensions : sequence of ints The shape of the grid. dtype : data-type, optional - The desired data-type for the array. Default is `float32`. + The desired data-type for the array. Default is `int64`. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. @@ -7121,10 +7159,10 @@ def indices(dimensions, dtype=_np.int32, ctx=None): (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], - [1, 1, 1]]) + [1, 1, 1]], dtype=int64) >>> grid[1] # column indices array([[0, 0, 0], - [1, 1, 1]], dtype=int32) + [1, 1, 1]], dtype=int64) The indices can be used as an index into an array. @@ -7351,7 +7389,7 @@ def diag_indices_from(arr): # pylint: disable=redefined-outer-name @set_module('mxnet.numpy') -def hanning(M, dtype=_np.float32, ctx=None): +def hanning(M, dtype=None, ctx=None): r"""Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. @@ -7361,9 +7399,6 @@ def hanning(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -7372,6 +7407,9 @@ def hanning(M, dtype=_np.float32, ctx=None): out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -7428,12 +7466,11 @@ def hanning(M, dtype=_np.float32, ctx=None): >>> plt.show() """ return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx) -# pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.numpy') -def hamming(M, dtype=_np.float32, ctx=None): +def hamming(M, dtype=None, ctx=None): r"""Return the hamming window. The hamming window is a taper formed by using a weighted cosine. @@ -7443,9 +7480,6 @@ def hamming(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -7454,6 +7488,9 @@ def hamming(M, dtype=_np.float32, ctx=None): out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -7508,12 +7545,11 @@ def hamming(M, dtype=_np.float32, ctx=None): >>> plt.show() """ return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx) -# pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.numpy') -def blackman(M, dtype=_np.float32, ctx=None): +def blackman(M, dtype=None, ctx=None): r"""Return the Blackman window. The Blackman window is a taper formed by using the first three @@ -7526,9 +7562,6 @@ def blackman(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -7537,6 +7570,9 @@ def blackman(M, dtype=_np.float32, ctx=None): out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -7586,7 +7622,6 @@ def blackman(M, dtype=_np.float32, ctx=None): >>> plt.show() """ return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx) -# pylint: enable=redefined-outer-name @set_module('mxnet.numpy') diff --git a/python/mxnet/numpy/random.py b/python/mxnet/numpy/random.py index 6d46b2d314aa..aed7af61ff9c 100644 --- a/python/mxnet/numpy/random.py +++ b/python/mxnet/numpy/random.py @@ -102,7 +102,9 @@ def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None): ``low`` and ``high`` are both scalars. Otherwise, ``np.broadcast(low, high).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32' + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -154,7 +156,9 @@ def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): a single value is returned if loc and scale are both scalars. Otherwise, ``np.broadcast(low, high).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32' + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output, default is current context. out : ``ndarray``, optional diff --git a/python/mxnet/numpy_extension/__init__.py b/python/mxnet/numpy_extension/__init__.py index a9e34b1b460b..a3666c48bd9d 100644 --- a/python/mxnet/numpy_extension/__init__.py +++ b/python/mxnet/numpy_extension/__init__.py @@ -25,7 +25,8 @@ from . import _register from ._op import * # pylint: disable=wildcard-import from ..context import * # pylint: disable=wildcard-import -from ..util import is_np_shape, is_np_array, set_np, reset_np, get_cuda_compute_capability +from ..util import is_np_shape, is_np_array, set_np, reset_np, get_cuda_compute_capability,\ + is_np_default_dtype, set_np_default_dtype from ..ndarray import waitall from .utils import * # pylint: disable=wildcard-import diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 69a22fdc0c21..dc18bc8b7283 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -26,6 +26,7 @@ from ...base import py_str from ...util import check_call, set_module, _sanity_check_params from ...util import wrap_np_unary_func, wrap_np_binary_func +from ...util import is_np_default_dtype from ...context import current_context from ..symbol import Symbol, Group from .._internal import _set_np_symbol_class @@ -1051,7 +1052,7 @@ def broadcast_like(self, *args, **kwargs): @set_module('mxnet.symbol.numpy') -def zeros(shape, dtype=_np.float32, order='C', ctx=None): +def zeros(shape, dtype=float, order='C', ctx=None): """Return a new array of given shape and type, filled with zeros. This function currently only supports storing multi-dimensional data in row-major (C-style). @@ -1061,10 +1062,12 @@ def zeros(shape, dtype=_np.float32, order='C', ctx=None): shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type. Default is `numpy.float32`. Note that this - behavior is different from NumPy's `zeros` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type . + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `zeros` function where `float64` + is the default value, here we can set 'float32' or 'float64' as your default dtype, + because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -1080,12 +1083,13 @@ def zeros(shape, dtype=_np.float32, order='C', ctx=None): raise NotImplementedError if ctx is None: ctx = current_context() - dtype = _np.float32 if dtype is None else dtype + if dtype is None or dtype is float: + dtype = _np.float64 if is_np_default_dtype() else _np.float32 return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype) @set_module('mxnet.symbol.numpy') -def ones(shape, dtype=_np.float32, order='C', ctx=None): +def ones(shape, dtype=None, order='C', ctx=None): """Return a new array of given shape and type, filled with ones. This function currently only supports storing multi-dimensional data in row-major (C-style). @@ -1095,10 +1099,11 @@ def ones(shape, dtype=_np.float32, order='C', ctx=None): shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional - An optional value type. Default is `numpy.float32`. Note that this - behavior is different from NumPy's `ones` function where `float64` - is the default value, because `float32` is considered as the default - data type in deep learning. + An optional value type. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that this behavior is different from NumPy's `ones` function where + `float64` is the default value. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. @@ -1114,7 +1119,6 @@ def ones(shape, dtype=_np.float32, order='C', ctx=None): raise NotImplementedError if ctx is None: ctx = current_context() - dtype = _np.float32 if dtype is None else dtype return _npi.ones(shape=shape, ctx=ctx, dtype=dtype) @@ -1251,6 +1255,8 @@ def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylin fill_value : scalar or _Symbol Fill value. dtype : data-type, optional + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. order : {'C'}, optional @@ -1300,7 +1306,6 @@ def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylin if isinstance(fill_value, bool): fill_value = int(fill_value) dtype = _np.bool if dtype is None else dtype - dtype = _np.float32 if dtype is None else dtype return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out) @@ -1448,7 +1453,9 @@ def identity(n, dtype=None, ctx=None): n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional - Data-type of the output. Defaults to ``numpy.float32``. + Data-type of the output. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional An optional device context (default is the current default context). @@ -1464,7 +1471,6 @@ def identity(n, dtype=None, ctx=None): raise ValueError("Input 'n' cannot be negative") if ctx is None: ctx = current_context() - dtype = _np.float32 if dtype is None else dtype return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype) @@ -1903,7 +1909,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): @set_module('mxnet.symbol.numpy') -def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): +def eye(N, M=None, k=0, dtype=float, **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. @@ -1919,6 +1925,8 @@ def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- @@ -1930,6 +1938,8 @@ def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): ctx = kwargs.pop('ctx', current_context()) if ctx is None: ctx = current_context() + if dtype is None or dtype is float: + dtype = _np.float64 if is_np_default_dtype() else _np.float32 return _npi.eye(N, M, k, ctx, dtype) @@ -3661,7 +3671,9 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype - The type of the output array. The default is `float32`. + The type of the output array. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- @@ -3673,8 +3685,6 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): this rule may result in the last element of `out` being greater than `stop`. """ - if dtype is None: - dtype = 'float32' if ctx is None: ctx = current_context() if stop is None: @@ -4130,7 +4140,6 @@ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] - arrays = get_list(arrays) return _npi.stack(*arrays, axis=axis, out=out) @@ -4163,7 +4172,6 @@ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] - arrays = get_list(arrays) return _npi.vstack(*arrays) @@ -4572,7 +4580,10 @@ def average(a, axis=None, weights=None, returned=False, out=None): Return the average along the specified axis. When returned is True, return a tuple with the average as the first element and the sum of the weights as the second element. sum_of_weights is of the same type as retval. - If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a. + If a is integral, the result dtype will beyour current default dtype, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; + otherwise it will be the same as dtype of a. Raises -------- @@ -4595,7 +4606,8 @@ def average(a, axis=None, weights=None, returned=False, out=None): - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens - Does not support complex dtype - The dtypes of a and weights must be the same - - Integral a results in float32 returned dtype, not float64 + - Integral a results in float32 or float64 returned dtype, which depends on your current default dtype + Examples -------- @@ -4640,7 +4652,9 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default is float32; + Type to use in computing the mean. + For integer inputs, When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; for floating point inputs, it is the same as the input dtype. out : _Symbol, optional Dummy parameter to keep the consistency with the ndarray counterpart. @@ -4665,7 +4679,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable the following way(s): - only _Symbol is accepted as valid input, python iterables or scalar is not supported - - default data type for integer input is float32 + - default data type for integer input is float32 or float64, which depends on your current default dtype Examples -------- @@ -4752,8 +4766,11 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: If this is a tuple of ints, computation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional - Type to use in computing the variance. For integer inputs, the default is float32; - for floating point inputs, it is the same as the input dtype. + Type to use in computing the variance. + For arrays of integer type, + When npx.is_np_default_dtype() returns Flase, default dtype is float32, + When npx.is_np_default_dtype() returns True, default dtype is float64; + For arrays of float types it is the same as the array type. out : _Symbol, optional Dummy parameter to keep the consistency with the ndarray counterpart. keepdims : bool, optional @@ -4785,7 +4802,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') -def indices(dimensions, dtype=_np.int32, ctx=None): +def indices(dimensions, dtype=None, ctx=None): """Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0,1,... @@ -4796,7 +4813,7 @@ def indices(dimensions, dtype=_np.int32, ctx=None): dimensions : sequence of ints The shape of the grid. dtype : data-type, optional - The desired data-type for the array. Default is `float32`. + The desired data-type for the array. Default is `int64`. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. @@ -4826,10 +4843,10 @@ def indices(dimensions, dtype=_np.int32, ctx=None): (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], - [1, 1, 1]]) + [1, 1, 1]], dtype=int64) >>> grid[1] # column indices array([[0, 0, 0], - [1, 1, 1]], dtype=int32) + [1, 1, 1]], dtype=int64) The indices can be used as an index into an array. @@ -5022,7 +5039,7 @@ def diag_indices_from(arr): @set_module('mxnet.symbol.numpy') -def hanning(M, dtype=_np.float32, ctx=None): +def hanning(M, dtype=None, ctx=None): r"""Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. @@ -5032,9 +5049,6 @@ def hanning(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5043,6 +5057,9 @@ def hanning(M, dtype=_np.float32, ctx=None): out : _Symbol, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -5104,7 +5121,7 @@ def hanning(M, dtype=_np.float32, ctx=None): @set_module('mxnet.symbol.numpy') -def hamming(M, dtype=_np.float32, ctx=None): +def hamming(M, dtype=None, ctx=None): r"""Return the hamming window. The hamming window is a taper formed by using a weighted cosine. @@ -5114,9 +5131,6 @@ def hamming(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5125,6 +5139,9 @@ def hamming(M, dtype=_np.float32, ctx=None): out : _Symbol, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- @@ -5184,7 +5201,7 @@ def hamming(M, dtype=_np.float32, ctx=None): @set_module('mxnet.symbol.numpy') -def blackman(M, dtype=_np.float32, ctx=None): +def blackman(M, dtype=None, ctx=None): r"""Return the Blackman window. The Blackman window is a taper formed by using the first three @@ -5197,9 +5214,6 @@ def blackman(M, dtype=_np.float32, ctx=None): M : int Number of points in the output window. If zero or less, an empty array is returned. - dtype : str or numpy.dtype, optional - An optional value type. Default is `float32`. Note that you need - select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). @@ -5208,6 +5222,9 @@ def blackman(M, dtype=_np.float32, ctx=None): out : _Symbol The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. + Note that you need select numpy.float32 or float64 in this operator. See Also -------- diff --git a/python/mxnet/symbol/numpy/random.py b/python/mxnet/symbol/numpy/random.py index db3338494a43..fa4658f98850 100644 --- a/python/mxnet/symbol/numpy/random.py +++ b/python/mxnet/symbol/numpy/random.py @@ -17,7 +17,9 @@ """Namespace for operators used in Gluon dispatched by F=symbol.""" +import numpy as np from ...context import current_context +from ...util import is_np_default_dtype from . import _internal as _npi @@ -137,7 +139,9 @@ def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None): a scalar tensor containing a single value is returned if ``low`` and ``high`` are both scalars. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32' + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -148,8 +152,6 @@ def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None): """ from ._symbol import _Symbol as np_symbol input_type = (isinstance(low, np_symbol), isinstance(high, np_symbol)) - if dtype is None: - dtype = 'float32' if ctx is None: ctx = current_context() if out is not None: @@ -188,7 +190,9 @@ def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): samples are drawn. If size is `None` (default), a scalar tensor containing a single value is returned if loc and scale are both scalars. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -199,8 +203,6 @@ def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): """ from ._symbol import _Symbol as np_symbol input_type = (isinstance(loc, np_symbol), isinstance(scale, np_symbol)) - if dtype is None: - dtype = 'float32' if ctx is None: ctx = current_context() if size == (): @@ -486,7 +488,9 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None): a single value is returned if ``shape`` and ``scale`` are both scalars. Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -501,8 +505,6 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None): """ from ._symbol import _Symbol as np_symbol input_type = (isinstance(shape, np_symbol), isinstance(scale, np_symbol)) - if dtype is None: - dtype = 'float32' if ctx is None: ctx = current_context() if out is not None: @@ -587,7 +589,9 @@ def beta(a, b, size=None, dtype=None, ctx=None): a single value is returned if ``a`` and ``b`` are both scalars. Otherwise, ``np.broadcast(a, b).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. Dtype 'float32' or 'float64' is strongly recommended, since lower precision might lead to out of range issue. ctx : Context, optional @@ -603,7 +607,7 @@ def beta(a, b, size=None, dtype=None, ctx=None): Drawn samples from the parameterized beta distribution. """ if dtype is None: - dtype = 'float32' + dtype = np.float64 if is_np_default_dtype() else np.float32 if ctx is None: ctx = current_context() if size == (): @@ -673,7 +677,9 @@ def chisquare(df, size=None, dtype=None, ctx=None): a single value is returned if ``df`` is a scalar. Otherwise, ``np.array(df).size`` samples are drawn. dtype : {'float16', 'float32', 'float64'}, optional - Data type of output samples. Default is 'float32'. + Data type of output samples. + When npx.is_np_default_dtype() returns Flase, default dtype is float32; + When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional Device context of output. Default is current context. @@ -715,7 +721,7 @@ def chisquare(df, size=None, dtype=None, ctx=None): """ if dtype is None: - dtype = 'float32' + dtype = np.float64 if is_np_default_dtype() else np.float32 if ctx is None: ctx = current_context() if size == (): diff --git a/python/mxnet/symbol/numpy_extension/random.py b/python/mxnet/symbol/numpy_extension/random.py index bad6a74d139f..51e25ca8ed76 100644 --- a/python/mxnet/symbol/numpy_extension/random.py +++ b/python/mxnet/symbol/numpy_extension/random.py @@ -79,8 +79,6 @@ def bernoulli(prob=None, logit=None, size=None, dtype=None, ctx=None, out=None): raise ValueError( "Either `prob` or `logit` must be specified, but not both. " + "Received prob={}, logit={}".format(prob, logit)) - if dtype is None: - dtype = 'float32' if ctx is None: ctx = current_context() if size == (): diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 6129d690d700..60c4c814a2b5 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -49,7 +49,7 @@ from .ndarray import array from .symbol import Symbol from .symbol.numpy import _Symbol as np_symbol -from .util import use_np # pylint: disable=unused-import +from .util import use_np, use_np_default_dtype # pylint: disable=unused-import from .runtime import Features from .numpy_extension import get_cuda_compute_capability diff --git a/python/mxnet/util.py b/python/mxnet/util.py index c685cf32bb12..ed0af26bff9f 100644 --- a/python/mxnet/util.py +++ b/python/mxnet/util.py @@ -34,6 +34,7 @@ _set_np_shape_logged = False _set_np_array_logged = False +_set_np_default_dtype_logged = False def get_gpu_count(): @@ -760,7 +761,7 @@ def _set_np_array(active): return cur_state -def set_np(shape=True, array=True): +def set_np(shape=True, array=True, dtype=True): """Setting NumPy shape and array semantics at the same time. It is required to keep NumPy shape semantics active while activating NumPy array semantics. Deactivating NumPy shape semantics while NumPy array semantics is still active is not allowed. @@ -778,7 +779,10 @@ def set_np(shape=True, array=True): When this flag is set to `True`, it enables Gluon code flow to use or generate `mxnet.numpy.ndarray`s instead of `mxnet.ndarray.NDArray`. For example, a `Block` would create parameters of type `mxnet.numpy.ndarray`. - + dtype : bool + A boolean value indicating whether the NumPy-dtype semantics should be turned on or off. + When this flag is set to `True`, default dtype is float64. + When this flag is set to `False`, default dtype is float32. Examples -------- >>> import mxnet as mx @@ -819,6 +823,8 @@ def set_np(shape=True, array=True): array(1.) >>> np.ones(shape=(2, 0, 3)) array([], shape=(2, 0, 3)) + >>> np.ones(shape=()).dtype + dtype('float64') When the `array` flag is `True`, Gluon layers would create parameters and outputs of type `mx.np.ndarray`. @@ -837,11 +843,12 @@ def set_np(shape=True, array=True): raise ValueError('NumPy Shape semantics is required in using NumPy array semantics.') _set_np_array(array) set_np_shape(shape) + set_np_default_dtype(dtype) def reset_np(): - """Deactivate NumPy shape and array semantics at the same time.""" - set_np(shape=False, array=False) + """Deactivate NumPy shape and array and deafult dtype semantics at the same time.""" + set_np(shape=False, array=False, dtype=False) _CUDA_SUCCESS = 0 @@ -903,3 +910,205 @@ def get_cuda_compute_capability(ctx): raise RuntimeError('cuDeviceComputeCapability failed with error code {}: {}' .format(ret, error_str.value.decode())) return cc_major.value * 10 + cc_minor.value + + +class _NumpyDefaultDtypeScope(object): + """Scope for managing NumPy default dtype semantics. + In NumPy default dtype semantics, default dtype is 'float64', + i.e. np.array([1, 2, 3]).dtype = np.float64 + Original default dtype without this semantic is 'float32'. + + Do not use this class directly. Use `np_shape(active)` instead. + + Example:: + + with _NumpyDefaultDtypeScope(True): + y = model(x) + backward([y]) + + """ + def __init__(self, is_np_default_dtype): #pylint: disable=redefined-outer-name + self._enter_is_np_default_dtype = is_np_default_dtype + self._prev_is_np_default_dtype = None + + def __enter__(self): + if self._enter_is_np_default_dtype is not None: + self._prev_is_np_default_dtype = set_np_default_dtype(self._enter_is_np_default_dtype) + + def __exit__(self, ptype, value, trace): + if self._enter_is_np_default_dtype is not None and\ + self._prev_is_np_default_dtype != self._enter_is_np_default_dtype: + set_np_default_dtype(self._prev_is_np_default_dtype) + +def np_default_dtype(active=True): + """Returns an activated/deactivated NumPy-default_dtype scope to be used in 'with' statement + and captures code that needs the NumPy default dtype semantics. i.e. default dtype is float64. + + Please note that this is designed as an infrastructure for the incoming + MXNet-NumPy operators. Legacy operators registered in the modules + `mx.nd` and `mx.sym` are not guaranteed to behave like their counterparts + in NumPy even within this scope. + + Parameters + ---------- + active : bool + Indicates whether to activate NumPy default dtype semantics. + + Returns + ------- + _NumpyDefaultDtypeScope + A scope object for wrapping the code w/ or w/o NumPy-default_dtype semantics. + + Example:: + + with mx.np_default_Dtype(active=True): + # Default Dtype is 'float64', consistent with offical NumPy behavior. + arr = mx.nd.array([1, 2, 3]) + assert arr.dtype == float64 + + with mx.np_default_dtype(active=False): + # Default Dtype is 'float32' in the legacy default dtype definition. + arr = mx.nd.array([1, 2, 3]) + assert arr.dtype == float32 + + """ + return _NumpyDefaultDtypeScope(active) + +def use_np_default_dtype(func): + """A decorator wrapping a function or class with activated NumPy-default_dtype semantics. + When `func` is a function, this ensures that the execution of the function is scoped with NumPy + default dtype semantics, with the support for float64 as default dtype. + When`func` is a class, it ensures that all the methods, static functions, and properties + of the class are executed with the NumPy-default_dtype semantics. + + Example: + import mxnet as mx + @mx.use_np_default_dtype + def float64_one(): + return mx.nd.ones(()).dtype + print(float64_one()) + + @np.use_np_default_dtype + class Float64Tensor(object): + def __init__(self, data=None): + if data is None: + data = Float64Tensor.random().data + self._data = data + + def __repr__(self): + print("Is __repr__ in np_default_dtype semantics? {}!".format(str(np.is_np_deafult_dtype()))) + return str(self._data.asnumpy()) + + @staticmethod + def random(): + data = mx.nd.random.uniform(shape=(2,2)) + return ScalarTensor(data) + + @property + def value(self): + print("Is value property in np_dafault_dtype semantics? {}!".format(str(np.is_np_default_dtype()))) + return self._data.asnumpy() + + + print("Is global scope of np_default_dtype activated? {}!".format(str(np.is_np_default_dtype()))) + float64_tensor = Float64Tensor() + print(float64_tensor) + + Parameters + ---------- + func : a user-provided callable function or class to be scoped by the NumPy-default_dtype semantics. + + Returns + ------- + Function or class + A function or class wrapped in the NumPy-default_dtype scope. + """ + if inspect.isclass(func): + for name, method in inspect.getmembers( + func, + predicate= + lambda f: inspect.isfunction(f) or inspect.ismethod(f) or isinstance(f, property)): + if isinstance(method, property): + setattr(func, name, property(use_np_default_dtype(method.__get__), + method.__set__, + method.__delattr__, + method.__doc__)) + else: + setattr(func, name, use_np_default_dtype(method)) + return func + elif callable(func): + @functools.wraps(func) + def _with_np_default_dtype(*args, **kwargs): + with np_default_dtype(active=True): + return func(*args, **kwargs) + return _with_np_default_dtype + else: + raise TypeError('use_np_default_dtype can only decorate classes and callable objects, ' + 'while received a {}'.format(str(type(func)))) + +def is_np_default_dtype(): + """Checks whether the NumPy default dtype semantics is currently turned on. + In NumPy default dtype semantics, default dtype is float64. + + Please note that this is designed as an infrastructure for the incoming + MXNet-NumPy operators. Legacy operators registered in the modules + `mx.nd` and `mx.sym` are not guaranteed to behave like their counterparts + in NumPy even within this scope. + + Returns + ------- + A bool value indicating whether the NumPy default dtype semantics is currently on. + + Example + ------- + >>> import mxnet as mx + >>> from mxnet import npx + >>> prev_state = npx.set_np_default_dtype(True) + >>> print(prev_state) + False + >>> print(npx.is_np_default_dtype()) + True + """ + curr = ctypes.c_bool() + check_call(_LIB.MXIsNumpyDefaultDtype(ctypes.byref(curr))) + return curr.value + +def set_np_default_dtype(is_np_default_dtype=True): # pylint: disable=redefined-outer-name + """Turns on/off NumPy default dtype semantics, because mxnet.numpy.ndarray use + 32 bit data storage as default (e.g. float32 and int 32) while offical NumPy use + 64 bit data storage as default (e.g. float64 and int64). + This is turned off by default for keeping backward compatibility. + + Please note that this is designed as an infrastructure for the incoming + MXNet-NumPy operators. Legacy operators registered in the modules + `mx.nd` and `mx.sym` are not guaranteed to behave like their counterparts + in NumPy within this semantics. + + Parameters + ---------- + active : bool + Indicates whether to turn on/off NumPy default dtype semantics. + + Returns + ------- + A bool value indicating the previous state of NumPy default dtype semantics. + + Example + ------- + >>> import mxnet as mx + >>> from mxnet import npx + >>> prev_state = npx.set_np_default_dtype(True) + >>> print(prev_state) + False + >>> print(npx.is_np_default_dtype()) + True + """ + global _set_np_default_dtype_logged + if is_np_default_dtype: + if not _set_np_default_dtype_logged: + import logging + logging.info('NumPy array default dtype has been changed from flaot32 to float64 in your code.') + _set_np_default_dtype_logged = True + prev = ctypes.c_bool() + check_call(_LIB.MXSetIsNumpyDefaultDtype(ctypes.c_bool(is_np_default_dtype), ctypes.byref(prev))) + return prev.value diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc index 4cd2e485d987..caa43ac57150 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc @@ -123,7 +123,7 @@ MXNET_REGISTER_API("_npi.mean") param.axis = mxnet::Tuple(args[1].operator ObjectRef()); } if (args[2].type_code() == kNull) { - param.dtype = dmlc::optional(); + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); } diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_init_op.cc index b61914917d7f..ba9a9f169d09 100644 --- a/src/api/operator/numpy/np_init_op.cc +++ b/src/api/operator/numpy/np_init_op.cc @@ -27,6 +27,7 @@ #include "../utils.h" #include "../../../operator/tensor/init_op.h" #include "../../../operator/numpy/np_init_op.h" +#include "../../../common/utils.h" namespace mxnet { @@ -42,7 +43,7 @@ MXNET_REGISTER_API("_npi.zeros") param.shape = TShape(args[0].operator ObjectRef()); } if (args[1].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } @@ -102,7 +103,7 @@ MXNET_REGISTER_API("_npi.indices") } // param.dtype if (args[1].type_code() == kNull) { - param.dtype = mshadow::kInt32; + param.dtype = -1; } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } @@ -216,7 +217,7 @@ MXNET_REGISTER_API("_npi.arange") param.repeat = 1; param.infer_range = false; if (args[3].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[3].operator std::string()); } @@ -245,7 +246,7 @@ MXNET_REGISTER_API("_npi.eye") } param.k = args[2].operator nnvm::dim_t(); if (args[4].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } @@ -275,7 +276,7 @@ MXNET_REGISTER_API("_npi.linspace") param.endpoint = args[3].operator bool(); } if (args[5].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[5].operator std::string()); } @@ -310,7 +311,7 @@ MXNET_REGISTER_API("_npi.logspace") param.base = args[4].operator double(); } if (args[6].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[6].operator std::string()); } @@ -325,4 +326,88 @@ MXNET_REGISTER_API("_npi.logspace") *ret = ndoutputs[0]; }); +MXNET_REGISTER_API("_npi.ones") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_ones"); + nnvm::NodeAttrs attrs; + op::InitOpParam param; + if (args[0].type_code() == kDLInt) { + param.shape = TShape(1, args[0].operator int64_t()); + } else { + param.shape = TShape(args[0].operator ObjectRef()); + } + if (args[1].type_code() == kNull) { + param.dtype = mxnet::common::GetDefaultDtype(); + } else { + param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); + } + attrs.parsed = std::move(param); + attrs.op = op; + if (args[2].type_code() != kNull) { + attrs.dict["ctx"] = args[2].operator std::string(); + } + int num_outputs = 0; + SetAttrDict(&attrs); + auto ndoutputs = Invoke(op, &attrs, 0, nullptr, &num_outputs, nullptr); + *ret = ndoutputs[0]; +}); + +MXNET_REGISTER_API("_npi.full") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_full"); + nnvm::NodeAttrs attrs; + op::InitOpWithScalarParam param; + if (args[0].type_code() == kDLInt) { + param.shape = TShape(1, args[0].operator int64_t()); + } else { + param.shape = TShape(args[0].operator ObjectRef()); + } + if (args[1].type_code() == kNull) { + param.dtype = mxnet::common::GetDefaultDtype(); + } else { + param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); + } + param.value = args[2].operator double(); + attrs.parsed = std::move(param); + attrs.op = op; + if (args[3].type_code() != kNull) { + attrs.dict["ctx"] = args[3].operator std::string(); + } + SetAttrDict(&attrs); + NDArray* out = args[4].operator mxnet::NDArray*(); + NDArray** outputs = out == nullptr ? nullptr : &out; + int num_outputs = out != nullptr; + auto ndoutputs = Invoke(op, &attrs, 0, nullptr, &num_outputs, outputs); + if (out) { + *ret = PythonArg(4); + } else { + *ret = ndoutputs[0]; + } +}); + +MXNET_REGISTER_API("_npi.identity") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_identity"); + nnvm::NodeAttrs attrs; + op::InitOpParam param; + param.shape = TShape(args[0].operator ObjectRef()); + if (args[1].type_code() == kNull) { + param.dtype = mxnet::common::GetDefaultDtype(); + } else { + param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); + } + attrs.parsed = std::move(param); + attrs.op = op; + if (args[2].type_code() != kNull) { + attrs.dict["ctx"] = args[2].operator std::string(); + } + int num_outputs = 0; + SetAttrDict(&attrs); + auto ndoutputs = Invoke(op, &attrs, 0, nullptr, &num_outputs, nullptr); + *ret = ndoutputs[0]; +}); + } // namespace mxnet diff --git a/src/api/operator/numpy/np_window_op.cc b/src/api/operator/numpy/np_window_op.cc index 8800b5cf2d01..6b99c09cc75a 100644 --- a/src/api/operator/numpy/np_window_op.cc +++ b/src/api/operator/numpy/np_window_op.cc @@ -25,6 +25,7 @@ #include #include "../utils.h" #include "../../../operator/numpy/np_window_op.h" +#include "../../../common/utils.h" namespace mxnet { @@ -40,7 +41,7 @@ inline static void SetNumpyWindowsParam(runtime::MXNetArgs args, param.M = args[0].operator nnvm::dim_t(); } if (args[1].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } diff --git a/src/api/operator/random/np_gamma_op.cc b/src/api/operator/random/np_gamma_op.cc index ec574273e6a3..44aeb44c44f8 100644 --- a/src/api/operator/random/np_gamma_op.cc +++ b/src/api/operator/random/np_gamma_op.cc @@ -84,7 +84,7 @@ MXNET_REGISTER_API("_npi.gamma") param.size = Tuple(args[2].operator ObjectRef()); } if (args[4].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } diff --git a/src/api/operator/random/np_normal_op.cc b/src/api/operator/random/np_normal_op.cc index a45936d21333..6c8046b3cc32 100644 --- a/src/api/operator/random/np_normal_op.cc +++ b/src/api/operator/random/np_normal_op.cc @@ -72,7 +72,7 @@ MXNET_REGISTER_API("_npi.normal") param.size = Tuple(args[2].operator ObjectRef()); } if (args[4].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } diff --git a/src/api/operator/random/np_uniform_op.cc b/src/api/operator/random/np_uniform_op.cc index d93991f63777..4cbc599cfe4c 100644 --- a/src/api/operator/random/np_uniform_op.cc +++ b/src/api/operator/random/np_uniform_op.cc @@ -72,7 +72,7 @@ MXNET_REGISTER_API("_npi.uniform") param.size = Tuple(args[2].operator ObjectRef()); } if (args[4].type_code() == kNull) { - param.dtype = mshadow::kFloat32; + param.dtype = mxnet::common::GetDefaultDtype(); } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } diff --git a/src/c_api/c_api_ndarray.cc b/src/c_api/c_api_ndarray.cc index 45cb71ac54a1..2a47afd31788 100644 --- a/src/c_api/c_api_ndarray.cc +++ b/src/c_api/c_api_ndarray.cc @@ -332,6 +332,18 @@ int MXSetIsNumpyShape(int is_np_shape, int* prev) { API_END(); } +int MXIsNumpyDefaultDtype(bool* curr) { + API_BEGIN(); + *curr = Imperative::Get()->is_np_default_dtype(); + API_END(); +} + +int MXSetIsNumpyDefaultDtype(bool default_dtype, bool* prev) { + API_BEGIN(); + *prev = Imperative::Get()->set_is_np_default_dtype(default_dtype); + API_END(); +} + int MXAutogradMarkVariables(uint32_t num_var, NDArrayHandle *var_handles, uint32_t *reqs_array, diff --git a/src/common/utils.h b/src/common/utils.h index 95133d8b53e1..9ea3329f2c24 100644 --- a/src/common/utils.h +++ b/src/common/utils.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -936,6 +937,19 @@ NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) { return profiler_scope; } +inline int GetDefaultDtype() { + return Imperative::Get()->is_np_default_dtype() ? + mshadow::kFloat64 : + mshadow::kFloat32; +} + +inline int GetDefaultDtype(int dtype) { + if (dtype != -1) return dtype; + return Imperative::Get()->is_np_default_dtype() ? + mshadow::kFloat64 : + mshadow::kFloat32; +} + } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_ diff --git a/src/operator/numpy/linalg/np_gesvd.cc b/src/operator/numpy/linalg/np_gesvd.cc index 8c8c9ec8680b..6d48c1ecabac 100644 --- a/src/operator/numpy/linalg/np_gesvd.cc +++ b/src/operator/numpy/linalg/np_gesvd.cc @@ -27,6 +27,7 @@ #include "./np_gesvd-inl.h" #include "../../mshadow_op.h" #include "../../mxnet_op.h" +#include "../../tensor/init_op.h" #include "../../operator_common.h" #include "../../elemwise_op_common.h" diff --git a/src/operator/numpy/np_broadcast_reduce_op.h b/src/operator/numpy/np_broadcast_reduce_op.h index 53b7de1744e3..235841cd50d4 100644 --- a/src/operator/numpy/np_broadcast_reduce_op.h +++ b/src/operator/numpy/np_broadcast_reduce_op.h @@ -28,6 +28,7 @@ #include #include #include +#include "../../common/utils.h" #include "../nn/moments-inl.h" #include "../tensor/broadcast_reduce_op.h" #include "../tensor/elemwise_binary_broadcast_op.h" diff --git a/src/operator/numpy/np_broadcast_reduce_op_value.cc b/src/operator/numpy/np_broadcast_reduce_op_value.cc index 33418667dfb7..2263f2cd4e12 100644 --- a/src/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/operator/numpy/np_broadcast_reduce_op_value.cc @@ -272,7 +272,7 @@ inline bool NumpyMeanType(const nnvm::NodeAttrs& attrs, TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); } else { - TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32); + TYPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::common::GetDefaultDtype()); } } diff --git a/src/operator/numpy/np_init_op.cc b/src/operator/numpy/np_init_op.cc index cffca8f10ba4..7719f26bed51 100644 --- a/src/operator/numpy/np_init_op.cc +++ b/src/operator/numpy/np_init_op.cc @@ -57,6 +57,16 @@ inline bool NumpyIndicesShape(const nnvm::NodeAttrs& attrs, return shape_is_known(out_shapes->at(0)); } +inline bool NumpyIndicesType(const nnvm::NodeAttrs& attrs, + std::vector* in_attrs, + std::vector* out_attrs) { + const IndicesOpParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_attrs->size(), 0U); + CHECK_EQ(out_attrs->size(), 1U); + TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype == -1 ? mshadow::kInt64 : param.dtype); + return true; +} + inline bool LogspaceShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { @@ -74,7 +84,7 @@ NNVM_REGISTER_OP(_npi_zeros) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", InitShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FInferStorageType", InitStorageType) .set_attr("FCompute", FillCompute) .add_arguments(InitOpParam::__FIELDS__()); @@ -85,7 +95,7 @@ NNVM_REGISTER_OP(_npi_ones) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", InitShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", FillCompute) .add_arguments(InitOpParam::__FIELDS__()); @@ -95,7 +105,7 @@ NNVM_REGISTER_OP(_npi_identity) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", InitShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", IdentityCompute) .add_arguments(InitOpParam::__FIELDS__()); @@ -189,12 +199,22 @@ NNVM_REGISTER_OP(_npi_full_like) "The shape and data-type of a define these same attributes of the returned array.") .add_arguments(FullLikeOpParam::__FIELDS__()); +NNVM_REGISTER_OP(_npi_full) + .describe("fill target with a scalar value") + .set_num_inputs(0) + .set_num_outputs(1) + .set_attr_parser(ParamParser) + .set_attr("FInferShape", InitShape) + .set_attr("FInferType", InitNumpyType) + .set_attr("FCompute", InitFillWithScalarCompute) +.add_arguments(InitOpWithScalarParam::__FIELDS__()); + NNVM_REGISTER_OP(_npi_arange) .set_num_inputs(0) .set_num_outputs(1) .set_attr_parser(RangeParamParser) .set_attr("FInferShape", NumpyRangeShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", RangeCompute) .add_arguments(RangeParam::__FIELDS__()); @@ -204,7 +224,7 @@ NNVM_REGISTER_OP(_npi_eye) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", NumpyEyeShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", NumpyEyeFill) .add_arguments(NumpyEyeParam::__FIELDS__()); @@ -214,17 +234,27 @@ NNVM_REGISTER_OP(_npi_indices) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", NumpyIndicesShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", NumpyIndicesType) .set_attr("FCompute", IndicesCompute) .add_arguments(IndicesOpParam::__FIELDS__()); +NNVM_REGISTER_OP(_npi_linspace) +.describe("Return evenly spaced numbers over a specified interval. Similar to Numpy") +.set_num_inputs(0) +.set_num_outputs(1) +.set_attr_parser(ParamParser) +.set_attr("FInferShape", LinspaceShape) +.set_attr("FInferType", InitNumpyType) +.set_attr("FCompute", LinspaceCompute) +.add_arguments(RangeParam::__FIELDS__()); + NNVM_REGISTER_OP(_npi_logspace) .describe("Return numbers spaced evenly on a log scale.") .set_num_inputs(0) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", LogspaceShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", LogspaceCompute) .add_arguments(LogspaceParam::__FIELDS__()); diff --git a/src/operator/numpy/np_init_op.cu b/src/operator/numpy/np_init_op.cu index 95e4322f31e7..8b0760ed5765 100644 --- a/src/operator/numpy/np_init_op.cu +++ b/src/operator/numpy/np_init_op.cu @@ -41,6 +41,9 @@ NNVM_REGISTER_OP(_npi_identity) NNVM_REGISTER_OP(_npi_full_like) .set_attr("FCompute", FullLikeOpCompute); +NNVM_REGISTER_OP(_npi_full) +.set_attr("FCompute", InitFillWithScalarCompute); + NNVM_REGISTER_OP(_npi_atleast_1d) .set_attr("FCompute", AtleastNDCompute); @@ -59,6 +62,9 @@ NNVM_REGISTER_OP(_npi_eye) NNVM_REGISTER_OP(_npi_indices) .set_attr("FCompute", IndicesCompute); +NNVM_REGISTER_OP(_npi_linspace) +.set_attr("FCompute", LinspaceCompute); + NNVM_REGISTER_OP(_npi_logspace) .set_attr("FCompute", LogspaceCompute); diff --git a/src/operator/numpy/np_init_op.h b/src/operator/numpy/np_init_op.h index 1288cf9e6225..a0f4523efe13 100644 --- a/src/operator/numpy/np_init_op.h +++ b/src/operator/numpy/np_init_op.h @@ -60,7 +60,8 @@ struct NumpyEyeParam : public dmlc::Parameter { .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) - .set_default(mshadow::kFloat32) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Data-type of the returned array."); } @@ -84,7 +85,8 @@ struct IndicesOpParam : public dmlc::Parameter { DMLC_DECLARE_PARAMETER(IndicesOpParam) { DMLC_DECLARE_FIELD(dimensions) .describe("The shape of the grid."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kInt32) + DMLC_DECLARE_FIELD(dtype).set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Target data type."); DMLC_DECLARE_FIELD(ctx) @@ -259,7 +261,8 @@ struct LogspaceParam : public dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) + DMLC_DECLARE_FIELD(dtype).set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Target data type."); } diff --git a/src/operator/numpy/np_true_divide-inl.h b/src/operator/numpy/np_true_divide-inl.h index 0bc60a08803e..319660066aae 100644 --- a/src/operator/numpy/np_true_divide-inl.h +++ b/src/operator/numpy/np_true_divide-inl.h @@ -58,9 +58,10 @@ void TrueDivideScalarCompute(const nnvm::NodeAttrs &attrs, }); } else { #ifndef _WIN32 - CHECK_EQ(outputs[0].type_flag_, kFloat32) << "true_divide only supports float32 output " - "when input's dtype is " - << type_string(inputs[0].type_flag_); + CHECK_EQ(outputs[0].type_flag_, mxnet::common::GetDefaultDtype()) + << "true_divide only supports float32 and float64" + " output when input's dtype is " + << type_string(inputs[0].type_flag_); MXNET_INT_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel, xpu>::Launch( @@ -104,10 +105,11 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs &attrs, }); }); } else { - // If both are the same integers, output is float32 - CHECK_EQ(out.type_flag_, kFloat32) << "true_divide only supports float32 output " - "when input's dtype is " - << type_string(lhs.type_flag_); + // If both are the same integers, output is float32 or float64 + CHECK_EQ(out.type_flag_, mxnet::common::GetDefaultDtype()) + << "true_divide only supports float32 and float64" + " output when input's dtype is " + << type_string(lhs.type_flag_); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(lhs.type_flag_, DType, { Kernel, xpu>::Launch( @@ -228,8 +230,8 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& attrs, lhs.dptr(), rhs.dptr(), out.dptr()); }); } else { - CHECK_EQ(out.type_flag_, mshadow::kFloat32) - << "true_divide only supports float32 output when input's dtype is " + CHECK_EQ(out.type_flag_, mxnet::common::GetDefaultDtype()) + << "true_divide only supports float32 and float64 output when input's dtype is " << type_string(lhs.type_flag_); MXNET_INT_TYPE_SWITCH(lhs.type_flag_, DType, { // If both inputs are the same integer types, output is float type @@ -288,8 +290,8 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& attrs, lhs.dptr(), rhs.dptr(), out.dptr()); }); } else { - CHECK_EQ(out.type_flag_, mshadow::kFloat32) - << "true_divide only supports float32 output when input's dtype is " + CHECK_EQ(out.type_flag_, mxnet::common::GetDefaultDtype()) + << "true_divide only supports float32 and float64 output when input's dtype is " << type_string(lhs.type_flag_); MXNET_INT_TYPE_SWITCH(lhs.type_flag_, DType, { // If both inputs are the same integer types, output is float type diff --git a/src/operator/numpy/np_true_divide.cc b/src/operator/numpy/np_true_divide.cc index 1e46cc9d13b5..f98cb2a0d973 100644 --- a/src/operator/numpy/np_true_divide.cc +++ b/src/operator/numpy/np_true_divide.cc @@ -36,8 +36,8 @@ int TrueDivideOutType(int ltype, int rtype) { // If only one of the inputs is float, return that float type return (common::is_float(ltype)) ? ltype : rtype; } - // If neither of the inputs is float, return the default float32 type - return mshadow::kFloat32; + // If neither of the inputs is float, return the default dtype + return mxnet::common::GetDefaultDtype(); } template @@ -55,7 +55,8 @@ bool TrueDivideType(const nnvm::NodeAttrs& attrs, const int lhs_dtype = in_attrs->at(0); const int rhs_dtype = (num_inputs == 2) ? in_attrs->at(1) : - (common::is_float(lhs_dtype) ? lhs_dtype : mshadow::kFloat32); + (common::is_float(lhs_dtype) ? + lhs_dtype : mxnet::common::GetDefaultDtype()); TYPE_ASSIGN_CHECK(*out_attrs, 0, TrueDivideOutType(lhs_dtype, rhs_dtype)); return true; } diff --git a/src/operator/numpy/np_window_op.cc b/src/operator/numpy/np_window_op.cc index 91338a134edc..b1a039b4b2cb 100644 --- a/src/operator/numpy/np_window_op.cc +++ b/src/operator/numpy/np_window_op.cc @@ -49,7 +49,7 @@ NNVM_REGISTER_OP(_npi_hanning) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", NumpyWindowsShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", NumpyWindowCompute) .add_arguments(NumpyWindowsParam::__FIELDS__()); @@ -60,7 +60,7 @@ NNVM_REGISTER_OP(_npi_hamming) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", NumpyWindowsShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", NumpyWindowCompute) .add_arguments(NumpyWindowsParam::__FIELDS__()); @@ -71,7 +71,7 @@ NNVM_REGISTER_OP(_npi_blackman) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", NumpyWindowsShape) -.set_attr("FInferType", InitType) +.set_attr("FInferType", InitNumpyType) .set_attr("FCompute", NumpyWindowCompute) .add_arguments(NumpyWindowsParam::__FIELDS__()); diff --git a/src/operator/numpy/np_window_op.h b/src/operator/numpy/np_window_op.h index be85f19b3371..2447313de9b7 100644 --- a/src/operator/numpy/np_window_op.h +++ b/src/operator/numpy/np_window_op.h @@ -55,7 +55,8 @@ struct NumpyWindowsParam : public dmlc::Parameter { .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) - .set_default(mshadow::kFloat32) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Data-type of the returned array."); } diff --git a/src/operator/numpy/random/np_bernoulli_op.h b/src/operator/numpy/random/np_bernoulli_op.h index 0df10896c99a..06f07ac0ae99 100644 --- a/src/operator/numpy/random/np_bernoulli_op.h +++ b/src/operator/numpy/random/np_bernoulli_op.h @@ -29,6 +29,7 @@ #include #include #include +#include "../../../common/utils.h" #include "../../elemwise_op_common.h" #include "../../mshadow_op.h" #include "../../mxnet_op.h" @@ -59,16 +60,17 @@ struct NumpyBernoulliParam : public dmlc::Parameter { "Context of output, in format [cpu|gpu|cpu_pinned](n)." " Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) + .add_enum("None", -1) .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .add_enum("float16", mshadow::kFloat16) .add_enum("bool", mshadow::kBool) - .set_default(mshadow::kFloat32) + .set_default(-1) .describe( "DType of the output in case this can't be inferred. " - "Defaults to float32 if not defined (dtype=None)."); + "Defaults to float32 or float64 if not defined (dtype=None)."); DMLC_DECLARE_FIELD(is_logit); } }; @@ -77,7 +79,7 @@ inline bool NumpyBernoulliOpType(const nnvm::NodeAttrs &attrs, std::vector *in_attrs, std::vector *out_attrs) { const NumpyBernoulliParam ¶m = nnvm::get(attrs.parsed); - int otype = param.dtype; + int otype = mxnet::common::GetDefaultDtype(param.dtype); (*out_attrs)[0] = otype; return true; } diff --git a/src/operator/numpy/random/np_gamma_op.cc b/src/operator/numpy/random/np_gamma_op.cc index 72e337b1642b..f87e997d549e 100644 --- a/src/operator/numpy/random/np_gamma_op.cc +++ b/src/operator/numpy/random/np_gamma_op.cc @@ -38,7 +38,7 @@ inline bool NumpyGammaOpType(const nnvm::NodeAttrs& attrs, if (otype != -1) { (*out_attrs)[0] = otype; } else { - (*out_attrs)[0] = mshadow::kFloat32; + (*out_attrs)[0] = mxnet::common::GetDefaultDtype(param.dtype); } return true; } diff --git a/src/operator/numpy/random/np_gamma_op.h b/src/operator/numpy/random/np_gamma_op.h index c87ca655e551..58ca4c7c52c0 100644 --- a/src/operator/numpy/random/np_gamma_op.h +++ b/src/operator/numpy/random/np_gamma_op.h @@ -62,12 +62,14 @@ struct NumpyGammaParam : public dmlc::Parameter { .describe("Context of output, in format [xpu|xpu|xpu_pinned](n)." " Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) + .add_enum("None", -1) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .add_enum("float16", mshadow::kFloat16) - .set_default(mshadow::kFloat32) - .describe("DType of the output in case this can't be inferred. " - "Defaults to float32 if not defined (dtype=None)."); + .set_default(-1) + .describe("DType of the output in case this can't be inferred." + "Defaults to float64 or float32 if not defined (dtype=None)," + "which depends on your current default dtype."); } void SetAttrDict(std::unordered_map* dict) { std::ostringstream shape_s, scale_s, dtype_s, size_s; diff --git a/src/operator/numpy/random/np_laplace_op.h b/src/operator/numpy/random/np_laplace_op.h index b8e829582c06..6a8a0498ebe4 100644 --- a/src/operator/numpy/random/np_laplace_op.h +++ b/src/operator/numpy/random/np_laplace_op.h @@ -76,7 +76,7 @@ struct NumpyLaplaceParam : public dmlc::Parameter { (*dict)["loc"] = loc_s.str(); (*dict)["scale"] = scale_s.str(); (*dict)["size"] = size_s.str(); - (*dict)["dtype"] = dtype_s.str(); + (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); // We do not set ctx, because ctx has been set in dict instead of InitOpParam. // Setting ctx here results in an error. } diff --git a/src/operator/numpy/random/np_normal_op.h b/src/operator/numpy/random/np_normal_op.h index 4dd44060f49b..d81f3d38f3a3 100644 --- a/src/operator/numpy/random/np_normal_op.h +++ b/src/operator/numpy/random/np_normal_op.h @@ -31,6 +31,7 @@ #include #include #include "../../../api/operator/op_utils.h" +#include "../../../common/utils.h" #include "../../elemwise_op_common.h" #include "../../mshadow_op.h" #include "../../mxnet_op.h" @@ -60,13 +61,14 @@ struct NumpyNormalParam : public dmlc::Parameter { "Context of output, in format [cpu|gpu|cpu_pinned](n)." " Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) + .add_enum("None", -1) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .add_enum("float16", mshadow::kFloat16) - .set_default(mshadow::kFloat32) + .set_default(-1) .describe( "DType of the output in case this can't be inferred. " - "Defaults to float32 if not defined (dtype=None)."); + "Defaults to float32 or float64 if not defined (dtype=None)."); } void SetAttrDict(std::unordered_map* dict) { std::ostringstream loc_s, scale_s, dtype_s, size_s; @@ -89,7 +91,7 @@ inline bool NumpyNormalOpType(const nnvm::NodeAttrs &attrs, if (otype != -1) { (*out_attrs)[0] = otype; } else { - (*out_attrs)[0] = mshadow::kFloat32; + (*out_attrs)[0] = mxnet::common::GetDefaultDtype(); } (*out_attrs)[1] = mshadow::kFloat32; return true; diff --git a/src/operator/numpy/random/np_uniform_op.h b/src/operator/numpy/random/np_uniform_op.h index 06f2aeec0b6c..a15a7c96e310 100644 --- a/src/operator/numpy/random/np_uniform_op.h +++ b/src/operator/numpy/random/np_uniform_op.h @@ -30,6 +30,7 @@ #include #include #include "../../../api/operator/op_utils.h" +#include "../../../common/utils.h" #include "../../elemwise_op_common.h" #include "../../mshadow_op.h" #include "../../mxnet_op.h" @@ -59,13 +60,14 @@ struct NumpyUniformParam : public dmlc::Parameter { "Context of output, in format [cpu|gpu|cpu_pinned](n)." " Only used for imperative calls."); DMLC_DECLARE_FIELD(dtype) + .add_enum("None", -1) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .add_enum("float16", mshadow::kFloat16) - .set_default(mshadow::kFloat32) + .set_default(-1) .describe( "DType of the output in case this can't be inferred. " - "Defaults to float32 if not defined (dtype=None)."); + "Defaults to float32 or float64 if not defined (dtype=None)."); } void SetAttrDict(std::unordered_map* dict) { std::ostringstream low_s, high_s, dtype_s, size_s; @@ -88,7 +90,7 @@ inline bool NumpyUniformOpType(const nnvm::NodeAttrs &attrs, if (otype != -1) { (*out_attrs)[0] = otype; } else { - (*out_attrs)[0] = mshadow::kFloat32; + (*out_attrs)[0] = mxnet::common::GetDefaultDtype(); } return true; } diff --git a/src/operator/random/sample_op.h b/src/operator/random/sample_op.h index 8905749037ac..03ca89ef4e7b 100644 --- a/src/operator/random/sample_op.h +++ b/src/operator/random/sample_op.h @@ -29,6 +29,7 @@ #include #include #include +#include "../../common/utils.h" #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../elemwise_op_common.h" @@ -746,7 +747,7 @@ inline bool SampleOpType(const nnvm::NodeAttrs& attrs, dtype = param.dtype; } else { // Use default - dtype = mshadow::kFloat32; + dtype = mxnet::common::GetDefaultDtype(); } } bool dtype_ok = (dtype == mshadow::kFloat16) || (dtype == mshadow::kFloat32) || diff --git a/src/operator/tensor/init_op.cc b/src/operator/tensor/init_op.cc index 6db89a5cda61..aabd3f0eb7b2 100644 --- a/src/operator/tensor/init_op.cc +++ b/src/operator/tensor/init_op.cc @@ -82,7 +82,6 @@ NNVM_REGISTER_OP(_ones) .add_arguments(InitOpParam::__FIELDS__()); NNVM_REGISTER_OP(_full) -.add_alias("_npi_full") .describe("fill target with a scalar value") .set_num_inputs(0) .set_num_outputs(1) @@ -140,7 +139,6 @@ Examples:: .add_arguments(RangeLikeParam::__FIELDS__()); NNVM_REGISTER_OP(_linspace) -.add_alias("_npi_linspace") .describe("Return evenly spaced numbers over a specified interval. Similar to Numpy") .set_num_inputs(0) .set_num_outputs(1) diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h index b78ed00622ef..753b4b67466e 100644 --- a/src/operator/tensor/init_op.h +++ b/src/operator/tensor/init_op.h @@ -36,6 +36,7 @@ #include #include #include "../../api/operator/op_utils.h" +#include "../../common/utils.h" #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "../mxnet_op.h" @@ -58,7 +59,9 @@ struct InitOpParam : public dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) + DMLC_DECLARE_FIELD(dtype) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES_WITH_BOOL .describe("Target data type."); } @@ -157,14 +160,10 @@ struct EyeParam : public dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) - .add_enum("float32", mshadow::kFloat32) - .add_enum("float64", mshadow::kFloat64) - .add_enum("float16", mshadow::kFloat16) - .add_enum("uint8", mshadow::kUint8) - .add_enum("int8", mshadow::kInt8) - .add_enum("int32", mshadow::kInt32) - .add_enum("int64", mshadow::kInt64) + DMLC_DECLARE_FIELD(dtype) + .set_default(-1) + .add_enum("None", -1) + MXNET_ADD_ALL_TYPES .describe("Target data type."); } }; @@ -223,7 +222,9 @@ struct RangeParam : public dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) + DMLC_DECLARE_FIELD(dtype) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Target data type."); } @@ -288,12 +289,26 @@ struct InitOpWithScalarParam : dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) + DMLC_DECLARE_FIELD(dtype) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES_WITH_BOOL .describe("Target data type."); DMLC_DECLARE_FIELD(value) .describe("Value with which to fill newly created tensor"); } + + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream shape_s, dtype_s, value_s; + shape_s << shape; + dtype_s << dtype; + value_s << value; + (*dict)["shape"] = shape_s.str(); + (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); + (*dict)["value"] = value_s.str(); + // We do not set ctx, because ctx has been set in dict instead of InitOpParam. + // Setting ctx here results in an error. + } }; /*! \brief Parse keyword arguments as PType arguments and save to parsed */ @@ -328,7 +343,9 @@ struct LinspaceParam : public dmlc::Parameter { .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." "Only used for imperative calls."); - DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32) + DMLC_DECLARE_FIELD(dtype) + .set_default(-1) + .add_enum("None", -1) MXNET_ADD_ALL_TYPES .describe("Target data type."); } @@ -386,6 +403,17 @@ inline bool InitType(const nnvm::NodeAttrs& attrs, return true; } +template +inline bool InitNumpyType(const nnvm::NodeAttrs& attrs, + std::vector *in_attrs, + std::vector *out_attrs) { + const ParamType& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_attrs->size(), num_in); + CHECK_EQ(out_attrs->size(), 1U); + TYPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::common::GetDefaultDtype(param.dtype)); + return true; +} + template inline bool InitStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, diff --git a/tests/python/unittest/test_numpy_default_dtype.py b/tests/python/unittest/test_numpy_default_dtype.py new file mode 100644 index 000000000000..f4beb949cfc7 --- /dev/null +++ b/tests/python/unittest/test_numpy_default_dtype.py @@ -0,0 +1,225 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import +import numpy as _np +import mxnet +from mxnet import npx +from mxnet import numpy as np +from mxnet.test_utils import use_np, use_np_default_dtype +from common import with_seed + + +class DtypeOpArgMngr(object): + """Operator argument manager for storing operator workloads.""" + _args = {} + + @staticmethod + def add_workload(name, *args, **kwargs): + if name not in DtypeOpArgMngr._args: + DtypeOpArgMngr._args[name] = [] + DtypeOpArgMngr._args[name].append({'args': args, 'kwargs': kwargs}) + + @staticmethod + def get_workloads(name): + return DtypeOpArgMngr._args.get(name, None) + + +_NUMPY_DTYPE_DEFAULT_FUNC_LIST = [ + 'array', + 'ones', + 'zeros', + 'eye', + 'full', + 'arange', + 'identity', + 'linspace', + 'logspace', + 'mean', + 'hanning', + 'hamming', + 'blackman', + 'random.gamma', + 'random.uniform', + 'random.normal', + 'random.chisquare', + 'true_divide' +] + + +def _add_dtype_workload_array(): + DtypeOpArgMngr.add_workload('array', [1, 2, 3]) + + +def _add_dtype_workload_ones(): + DtypeOpArgMngr.add_workload('ones', 5) + DtypeOpArgMngr.add_workload('ones', (5,)) + + +def _add_dtype_workload_zeros(): + DtypeOpArgMngr.add_workload('zeros', 5) + DtypeOpArgMngr.add_workload('zeros', (5,)) + + +def _add_dtype_workload_eye(): + DtypeOpArgMngr.add_workload('eye', 3) + DtypeOpArgMngr.add_workload('eye', 3, k=1) + + +def _add_dtype_workload_full(): + DtypeOpArgMngr.add_workload('full', (2, 2), 10) + + +def _add_dtype_workload_arange(): + DtypeOpArgMngr.add_workload('arange', 3) + DtypeOpArgMngr.add_workload('arange', 3, 7) + DtypeOpArgMngr.add_workload('arange', 3, 7, 2) + DtypeOpArgMngr.add_workload('arange', 3.0) + + +def _add_dtype_workload_identity(): + DtypeOpArgMngr.add_workload('identity', 3) + + +def _add_dtype_workload_linspace(): + DtypeOpArgMngr.add_workload('linspace', 2.0, 3.0, num=5) + DtypeOpArgMngr.add_workload('linspace', 2.0, 3.0, num=5, endpoint=False) + + +def _add_dtype_workload_logspace(): + DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4) + DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4, endpoint=False) + DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4, base=2.0) + + +def _add_dtype_workload_mean(): + DtypeOpArgMngr.add_workload('mean', np.random.randint(0, 3,size=2)) + + +def _add_dtype_workload_hanning(): + DtypeOpArgMngr.add_workload('hanning', 3) + + +def _add_dtype_workload_hamming(): + DtypeOpArgMngr.add_workload('hamming', 3) + + +def _add_dtype_workload_blackman(): + DtypeOpArgMngr.add_workload('blackman', 3) + + +def _add_dtype_workload_random_uniform(): + DtypeOpArgMngr.add_workload('random.uniform', -1, 1, size=3) + + +def _add_dtype_workload_random_normal(): + DtypeOpArgMngr.add_workload('random.normal', 0, 0.1, 3) + + +def _add_dtype_workload_random_gamma(): + DtypeOpArgMngr.add_workload('random.gamma', 3) + + +def _add_dtype_workload_random_chisquare(): + DtypeOpArgMngr.add_workload('random.chisquare', 2, 4) + + +def _add_dtype_workload_true_divide(): + DtypeOpArgMngr.add_workload('true_divide', np.array([1,2], dtype=int), 4) + DtypeOpArgMngr.add_workload('true_divide', np.array([1,2], dtype=int), 2.0) + DtypeOpArgMngr.add_workload('true_divide', 4.0, np.array([1,2], dtype=int)) + + +def _prepare_workloads(): + _add_dtype_workload_array() + _add_dtype_workload_ones() + _add_dtype_workload_zeros() + _add_dtype_workload_eye() + _add_dtype_workload_full() + _add_dtype_workload_arange() + _add_dtype_workload_identity() + _add_dtype_workload_linspace() + _add_dtype_workload_logspace() + _add_dtype_workload_mean() + _add_dtype_workload_hanning() + _add_dtype_workload_hamming() + _add_dtype_workload_blackman() + _add_dtype_workload_random_gamma() + _add_dtype_workload_random_uniform() + _add_dtype_workload_random_normal() + _add_dtype_workload_true_divide() + _add_dtype_workload_random_chisquare() + +_prepare_workloads() + + +@use_np +@use_np_default_dtype +def check_np_default_dtype(op, *args, **kwargs): + assert op(*args, **kwargs).dtype == 'float64' + + +@use_np +def check_deepnp_default_dtype(op, *args, **kwargs): + assert op(*args, **kwargs).dtype == 'float32' + + +def check_default_dtype(op_list): + for op_name in op_list: + print('Default dtype test:', op_name) + workloads = DtypeOpArgMngr.get_workloads(op_name) + strs = op_name.split('.') + if len(strs) == 1: + op = getattr(np, op_name) + elif len(strs) == 2: + op = getattr(getattr(np, strs[0]), strs[1]) + else: + assert False + assert workloads is not None, 'Workloads for operator `{}` has not been ' \ + 'added for checking default dtype with the ' \ + 'official NumPy and the deep NumPy.'.format(name) + for workload in workloads: + check_np_default_dtype(op, *workload['args'], **workload['kwargs']) + check_deepnp_default_dtype(op, *workload['args'], **workload['kwargs']) + + +@with_seed() +def test_default_float_dtype(): + import platform + if 'Windows' not in platform.system(): + check_default_dtype(_NUMPY_DTYPE_DEFAULT_FUNC_LIST) + + +@use_np +def test_np_indices_default_dtype(): + import platform + if 'Windows' not in platform.system(): + @use_np_default_dtype + def check_np_indices_default_dtype(): + assert np.indices((3,)).dtype == 'int64' + + def check_deepnp_indices_default_dtype(): + assert np.indices((3,)).dtype == 'int64' + + check_deepnp_indices_default_dtype() + check_np_indices_default_dtype() + + + +if __name__ == '__main__': + import nose + nose.runmodule() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 115e76b408cc..6a1618e05f2d 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -6765,16 +6765,15 @@ def g(data, axis1, axis2, offset): @use_np def test_np_windows(): class TestWindows(HybridBlock): - def __init__(self, func, M, dtype): + def __init__(self, func, M): super(TestWindows, self).__init__() self._func = func self._M = M - self._dtype = dtype def hybrid_forward(self, F, x, *args, **kwargs): op = getattr(F.np, self._func) assert op is not None - return x + op(M=self._M, dtype=self._dtype) + return x + op(M=self._M) configs = [-10, -3, -1, 0, 1, 6, 10, 20] dtypes = ['float32', 'float64'] @@ -6785,14 +6784,14 @@ def hybrid_forward(self, F, x, *args, **kwargs): x = np.zeros(shape=(), dtype=dtype) for hybridize in [False, True]: np_func = getattr(_np, func) - mx_func = TestWindows(func, M=config, dtype=dtype) + mx_func = TestWindows(func, M=config) np_out = np_func(M=config).astype(dtype) if hybridize: mx_func.hybridize() mx_out = mx_func(x) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) # test imperative - mx_out = getattr(np, func)(M=config, dtype=dtype) + mx_out = getattr(np, func)(M=config) np_out = np_func(M=config).astype(dtype) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)