Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Initial checkin
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed Feb 7, 2020
1 parent cc4632d commit d71f23b
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 76 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ lib/libtvm_runtime.so:
[ -e $(LLVM_PATH)/bin/llvm-config ] || sh $(ROOTDIR)/contrib/tvmop/prepare_tvm.sh; \
cd $(TVM_PATH)/build; \
cmake -DUSE_LLVM="$(LLVM_PATH)/bin/llvm-config" \
-DUSE_SORT=OFF -DUSE_CUDA=$(TVM_USE_CUDA) -DUSE_CUDNN=OFF -DUSE_OPENMP=ON ..; \
-DUSE_SORT=OFF -DUSE_CUDA=$(TVM_USE_CUDA) -DUSE_CUDNN=OFF -DUSE_OPENMP=gnu ..; \
$(MAKE) VERBOSE=1; \
mkdir -p $(ROOTDIR)/lib; \
cp $(TVM_PATH)/build/libtvm_runtime.so $(ROOTDIR)/lib/libtvm_runtime.so; \
Expand Down
2 changes: 1 addition & 1 deletion cmake/BuildTVM.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ set(USE_VTA_TSIM OFF)
set(USE_RELAY_DEBUG OFF)

# Use OPENMP thread pool to be compatible with MXNet
set(USE_OPENMP ON)
set(USE_OPENMP gnu)

# Disable USE_MKLDNN for TVM
set(USE_MKLDNN OFF)
44 changes: 5 additions & 39 deletions python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2380,45 +2380,6 @@ def is_cd_run():
_features = Features()


def has_tvm_ops():
"""Returns True if MXNet is compiled with TVM generated operators. If current ctx
is GPU, it only returns True for CUDA compute capability > 52 where FP16 is supported.
"""
built_with_tvm_op = _features.is_enabled("TVM_OP")
ctx = current_context()
if ctx.device_type == 'gpu':
try:
cc = get_cuda_compute_capability(ctx)
except: # pylint: disable=bare-except
print('Failed to get CUDA compute capability for context {}. The operators '
'built with USE_TVM_OP=1 will not be run in unit tests.'.format(ctx))
return False
print('Cuda arch compute capability: sm_{}'.format(str(cc)))
return built_with_tvm_op and cc >= 53
return built_with_tvm_op


def is_op_runnable():
"""Returns True for all CPU tests. Returns True for GPU tests that are either of the following.
1. Built with USE_TVM_OP=0.
2. Built with USE_TVM_OP=1, but with compute capability >= 53.
"""
ctx = current_context()
if ctx.device_type == 'gpu':
if not _features.is_enabled("TVM_OP"):
return True
else:
try:
cc = get_cuda_compute_capability(ctx)
except: # pylint: disable=bare-except
print('Failed to get CUDA compute capability for context {}. The operators '
'built with USE_TVM_OP=1 will not be run in unit tests.'.format(ctx))
return False
print('Cuda arch compute capability: sm_{}'.format(str(cc)))
return cc >= 53
return True


@use_np
def check_gluon_hybridize_consistency(net_builder, data_l, numpy_func=None, test_grad=True,
rtol=1E-4, atol=1E-4):
Expand Down Expand Up @@ -2541,3 +2502,8 @@ def new_sym_matrix_with_real_eigvals_nd(shape):
"""Generate sym matrices with real eigenvalues."""
n = int(np.prod(shape[:-2])) if len(shape) > 2 else 1
return np.array([new_sym_matrix_with_real_eigvals_2d(shape[-1]) for i in range(n)]).reshape(shape)


def use_tvm_op():
"""Returns True if MXNet is built with USE_TVM_OP=1."""
return _features.is_enabled("TVM_OP")
18 changes: 1 addition & 17 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
from mxnet import np
from mxnet.test_utils import assert_almost_equal
from mxnet.test_utils import use_np
from mxnet.test_utils import is_op_runnable
from common import assertRaises, with_seed
from mxnet.numpy_dispatch_protocol import with_array_function_protocol, with_array_ufunc_protocol
from mxnet.numpy_dispatch_protocol import _NUMPY_ARRAY_FUNCTION_LIST, _NUMPY_ARRAY_UFUNC_LIST
Expand All @@ -36,14 +35,6 @@
_INT_DTYPES = [np.int8, np.int32, np.int64, np.uint8]
_FLOAT_DTYPES = [np.float16, np.float32, np.float64]
_DTYPES = _INT_DTYPES + _FLOAT_DTYPES
_TVM_OPS = [
'equal',
'not_equal',
'less',
'less_equal',
'greater',
'greater_equal'
]


class OpArgMngr(object):
Expand Down Expand Up @@ -1944,8 +1935,6 @@ def _check_interoperability_helper(op_name, *args, **kwargs):
onp_op = getattr(getattr(_np, strs[0]), strs[1])
else:
assert False
if not is_op_runnable():
return
out = onp_op(*args, **kwargs)
expected_out = _get_numpy_op_output(onp_op, *args, **kwargs)
if isinstance(out, (tuple, list)):
Expand All @@ -1963,12 +1952,7 @@ def _check_interoperability_helper(op_name, *args, **kwargs):

def check_interoperability(op_list):
for name in op_list:
if name in _TVM_OPS and not is_op_runnable():
continue
if name in ['shares_memory', 'may_share_memory', 'empty_like']: # skip list
continue
if name in ['full_like', 'zeros_like', 'ones_like'] and \
StrictVersion(platform.python_version()) < StrictVersion('3.0.0'):
if name in ['shares_memory', 'may_share_memory']: # skip list
continue
print('Dispatch test:', name)
workloads = OpArgMngr.get_workloads(name)
Expand Down
21 changes: 7 additions & 14 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from mxnet.gluon import HybridBlock
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, use_np
from common import with_seed, TemporaryDirectory
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, collapse_sum_like
from mxnet.ndarray.ndarray import py_slice
from mxnet.base import integer_types

Expand Down Expand Up @@ -267,19 +267,14 @@ def test_np_ndarray_binary_element_wise_ops():
'/': _np.divide,
'mod': _np.mod,
'pow': _np.power,

'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal,
}

if is_op_runnable():
np_op_map.update({
'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal
})

def _get_grad_func(op, scalar=None, reverse=False):
if op == '+':
if scalar is None:
Expand Down Expand Up @@ -1122,8 +1117,6 @@ def test_np_multinomial():


@with_seed()
@unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with"
" compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
def test_single_bool_index():
Expand Down
8 changes: 4 additions & 4 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
import random
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf
from mxnet.numpy_op_signature import _get_builtin_op
from mxnet.test_utils import is_op_runnable, has_tvm_ops
from mxnet.test_utils import use_tvm_op
from mxnet.operator import get_all_registered_operators


Expand Down Expand Up @@ -465,7 +465,7 @@ def is_int(dtype):
expected_ret = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims)
expected_ret = expected_ret.astype(dtype)
if itype == 'bool':
if is_op_runnable() and (not is_windows): # special handling of boolean ndarray
if not is_windows: # special handling of boolean ndarray
y = test_sum(x)
assert y.dtype == expected_ret.dtype
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-4, atol=1e-5,
Expand Down Expand Up @@ -870,7 +870,7 @@ def is_int(dtype):
expected_ret = _np.mean(x.asnumpy(), axis=axis, dtype=dtype, keepdims=keepdims)

if itype == 'bool':
if is_op_runnable() and (not is_windows) and dtype not in ['float16', 'int8']: # special handling of boolean ndarray
if (not is_windows) and dtype not in ['float16', 'int8']: # special handling of boolean ndarray
y = test_mean(x)
assert y.shape == expected_ret.shape
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,
Expand Down Expand Up @@ -1903,7 +1903,7 @@ def hybrid_forward(self, F, a, *args, **kwargs):
'arccosh' : (lambda x: 1./(x**2 - 1.)**(1./2.), 2.0, 5.0),
'arctanh' : (lambda x: -1./(x**2 - 1.), -0.99, 0.99)
}
if has_tvm_ops():
if use_tvm_op():
funcs['rad2deg'] = (lambda x: 180. / _np.pi * _np.ones(x.shape), -1.0, 1.0)
funcs['deg2rad'] = (lambda x: _np.pi / 180. * _np.ones(x.shape), -1.0, 1.0)
ndim = random.choice([2, 3, 4])
Expand Down

0 comments on commit d71f23b

Please sign in to comment.