Skip to content

Commit

Permalink
Added more tests for Large Indices (apache#15960)
Browse files Browse the repository at this point in the history
* Adding tests to verify support for Large Tensors in additional Ops along with new C_Apis supporting 64bit indexing

* removing skipped tests

* removing tests not required for vector testing

* Adding more tests for Large Indices and adding support for Large Indices in one_hot operator

* Re-Trigger build
  • Loading branch information
access2rohit authored and zixuanweeei committed Sep 2, 2019
1 parent 8cca23f commit 354dc63
Show file tree
Hide file tree
Showing 4 changed files with 233 additions and 13 deletions.
2 changes: 1 addition & 1 deletion src/ndarray/ndarray_function.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ void Copy<cpu, cpu>(const TBlob &from, TBlob *to,
RunContext ctx) {
MSHADOW_TYPE_SWITCH(to->type_flag_, DType, {
if (to->type_flag_ == from.type_flag_) {
const index_t size = from.Size();
const index_t size = static_cast<index_t>(from.Size());
CHECK_EQ(size, to->Size()) << "copying size mismatch, from: " << size * sizeof(DType)
<< " bytes, to: " << to->Size() * sizeof(DType) << " bytes.";
common::ParallelCopy(to->dptr<DType>(), from.dptr<DType>(), size);
Expand Down
18 changes: 9 additions & 9 deletions src/operator/tensor/indexing_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -1133,7 +1133,7 @@ void BatchTakeOpForward(const nnvm::NodeAttrs& attrs,
* \brief The parameters of the one_hot operator.
*/
struct OneHotParam : public dmlc::Parameter<OneHotParam> {
int depth;
index_t depth;
double on_value;
double off_value;
int axis;
Expand All @@ -1153,7 +1153,7 @@ struct OneHotParam : public dmlc::Parameter<OneHotParam> {
}
};

inline void GetOneHotParams(const OneHotParam& param, int* depth, double* on_value,
inline void GetOneHotParams(const OneHotParam& param, index_t* depth, double* on_value,
double* off_value, int* dtype) {
*depth = param.depth;
CHECK_GE(*depth, 0) << "Dimension size, depth, must be a non-negative integer";
Expand All @@ -1172,7 +1172,7 @@ inline bool OneHotOpShape(const nnvm::NodeAttrs& attrs,
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) return false;

int depth = 0;
index_t depth = 0;
double on_value = 1.0;
double off_value = 0.0;
int dtype = mshadow::kFloat32;
Expand All @@ -1193,7 +1193,7 @@ inline bool OneHotOpType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_NE((*in_attrs)[0], -1) << "Index type must be set for one_hot operator";
int depth = 0;
index_t depth = 0;
double on_value = 1.0;
double off_value = 0.0;
int dtype = -1;
Expand All @@ -1207,10 +1207,10 @@ inline bool OneHotOpType(const nnvm::NodeAttrs& attrs,
template<int req>
struct one_hot {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indices,
int depth, DType on_value) {
int offset = i * depth;
int j = static_cast<int>(indices[i]);
MSHADOW_XINLINE static void Map(index_t i, DType* out, const IType* indices,
index_t depth, DType on_value) {
index_t offset = i * depth;
index_t j = static_cast<index_t>(indices[i]);
if (j >= 0 && j < depth) {
KERNEL_ASSIGN(out[offset+j], req, on_value);
}
Expand All @@ -1229,7 +1229,7 @@ void OneHotOpForward(const nnvm::NodeAttrs& attrs,
// The following line is needed to guard the situation when
// an output array is empty on GPU. In that case, out.dptr() = 0x0
if (outputs[0].Size() == 0) return;
int depth = 0;
index_t depth = 0;
double on_value = 1.0;
double off_value = 0.0;
int dtype = mshadow::kFloat32;
Expand Down
4 changes: 2 additions & 2 deletions src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1148,8 +1148,8 @@ void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,

struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
int begin;
dmlc::optional<int> end;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
Expand Down
222 changes: 221 additions & 1 deletion tests/nightly/test_large_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ def test_Dense(ctx=mx.cpu(0)):
def test_argsort():
b = create_vector(size=LARGE_X)
s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64)
mx.nd.waitall()
assert (s[0].asnumpy() == (LARGE_X - 1)).all()


Expand All @@ -170,6 +169,227 @@ def test_topk():
assert val.sum() == (LARGE_X - 1)


def test_shape():
b = create_vector(size=LARGE_X)
#explicit wait_to_read()
assert b[0] == 0
assert b.shape[0] == LARGE_X


def test_size():
b = create_vector(size=LARGE_X)
#explicit wait_to_read()
assert b[0] == 0
assert b.size == LARGE_X


def test_copy():
a = nd.ones(LARGE_X)
b = a.copy()
assert a[0] == b[0]
assert b.shape == a.shape
assert b.size == LARGE_X


def test_copy_to():
a = create_vector(size=LARGE_X)
# keeping dtype same as input uses parallel copy which is much faster
b = nd.zeros(LARGE_X, dtype=np.int64)
c = a.copyto(b)
assert c is b
assert b[-1] == LARGE_X-1
assert b[0] == 0


def test_zeros_like():
a = nd.ones(LARGE_X)
b = nd.zeros_like(a)
assert b[-1] == 0
assert b.shape == a.shape


def test_ones_like():
a = nd.zeros(LARGE_X)
b = nd.ones_like(a)
assert b[-1] == 1
assert b.shape == a.shape


def test_concat():
a = nd.ones(LARGE_X)
b = nd.zeros(LARGE_X)
c = nd.concat(a,b, dim=0)
assert c[0][0] == 1
assert c[-1][-1] == 0
assert c.shape[0] == (2 * LARGE_X)


def test_sum():
a = nd.ones(LARGE_X)
b = nd.sum(a, axis=0)
assert b[0] == LARGE_X


def test_prod():
a = nd.ones(LARGE_X)
b = nd.prod(a, axis=0)
assert b[0] == 1


def test_min():
a = create_vector(size=LARGE_X)
b = nd.min(a, axis=0)
assert b[0] == 0
assert b[-1] == 0


def test_max():
a = create_vector(size=LARGE_X)
b = nd.max(a, axis=0)
assert b[0] == (LARGE_X - 1)


def test_argmax():
a = nd.ones(LARGE_X)
b = nd.zeros(LARGE_X)
c = nd.concat(a, b, dim=0)
d = nd.argmax(c, axis=0)
assert c.shape[0] == (2 * LARGE_X)
assert d == 0


def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x


def test_iadd():
a = nd.ones(LARGE_X)
b = nd.ones(LARGE_X)
c = b
c += a
assert c.shape == a.shape
assert c[-1] == 2


def test_isub():
a = nd.full(LARGE_X, 3)
b = nd.ones(LARGE_X)
c = a
c -= b
assert c.shape == a.shape
assert c[-1] == 2


def test_imul():
a = nd.full(LARGE_X, 3)
b = nd.ones(LARGE_X)
c = b
c *= a
assert c.shape == a.shape
assert c[-1] == 3


def test_idiv():
a = nd.full(LARGE_X, 4)
b = nd.full(LARGE_X, 2)
c = a
c /= b
assert c.shape == a.shape
assert c[-1] == 2


def test_imod():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
c = a
c %= b
assert c.shape == a.shape
assert c[0][-1] == 1


def test_eq():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 3)
c = (a == b)
assert np.sum(c[0].asnumpy() == 1).all()


def test_neq():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = (a != b)
assert np.sum(c[0].asnumpy() == 1).all()


def test_lt():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
d = (a <= b)
assert np.sum(d[0].asnumpy() == 1).all()


def test_lte():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = nd.full(LARGE_X, 2)
d = (a <= b)
assert np.sum(d[0].asnumpy() == 1).all()
d = (a <= c)
assert np.sum(d[0].asnumpy() == 1).all()


def test_gt():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
d = (a > b)
assert np.sum(d[0].asnumpy() == 1).all()


def test_gte():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
c = nd.full(LARGE_X, 3)
d = (a >= b)
assert np.sum(d[0].asnumpy() == 1).all()
d = (a >= c)
assert np.sum(d[0].asnumpy() == 1).all()


def test_slice_like():
a = create_vector(size=LARGE_X)
b = nd.ones(LARGE_X//2)
c = nd.slice_like(a, b)
assert c.shape == b.shape
assert c[0] == 0
assert c[-1] == (LARGE_X//2-1)


def test_slice_axis():
a = create_vector(size=LARGE_X)
c = nd.slice_axis(a, axis=0, begin=0, end=LARGE_X//2)
assert c.shape[0] == a.shape[0]//2
assert c[-1][0] == (LARGE_X//2-1)


def test_full():
a = nd.full(LARGE_X, 3)
assert a.shape[0] == LARGE_X
assert a[LARGE_X//2] == 3
assert a[-1] == 3


def test_one_hot():
a = nd.zeros(10)
a[0] = 1
a[-1] = 1
b = nd.one_hot(a, LARGE_X)
assert b[0][1] == 1
assert b[-1][1] == 1


if __name__ == '__main__':
import nose
nose.runmodule()

0 comments on commit 354dc63

Please sign in to comment.