Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
ElementWiseSum fix for oneDNN (#18859)
Browse files Browse the repository at this point in the history
* Fix ElementwiseSum for DNNL

* Add test for oneDNN ElemwiseSum

Co-authored-by: Bart Gawrych <[email protected]>
  • Loading branch information
bgawrych and Bart Gawrych authored Aug 6, 2020
1 parent a78f137 commit 84f8984
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 8 deletions.
15 changes: 8 additions & 7 deletions src/operator/tensor/elemwise_sum.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,14 @@ void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) ||
#if MXNET_USE_MKLDNN == 1
if (IsMKLDNNData(inputs)) {
MKLDNNRun(MKLDNNSumForward, attrs, ctx, inputs, req, outputs);
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
FallBackCompute(ElementWiseSumCompute<cpu>, attrs, ctx, inputs, req, outputs);
}
#endif
else if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) || // NOLINT(*)
(inputs.size() == 3U && inputs[0].storage_type() == kDefaultStorage &&
inputs[1].storage_type() == kCSRStorage && inputs[2].storage_type() == kDefaultStorage) ||
(inputs.size() > 4U && common::ContainsStorageType(inputs, kDefaultStorage) &&
Expand All @@ -123,12 +130,6 @@ void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
ResourceRequest(ResourceRequest::kTempSpace));
NDArray out_nd = outputs[0];
mxnet::ndarray::ElementwiseSum<cpu>(s, rsc, inputs, &out_nd);
#if MXNET_USE_MKLDNN == 1
} else if (IsMKLDNNData(inputs)) {
MKLDNNRun(MKLDNNSumForward, attrs, ctx, inputs, req, outputs);
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
FallBackCompute(ElementWiseSumCompute<cpu>, attrs, ctx, inputs, req, outputs);
#endif
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
Expand Down
24 changes: 23 additions & 1 deletion tests/python/mkl/test_mkldnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,28 @@ def test_flatten_slice_after_conv():
print(p[0])


def test_mkldnn_sum_with_mkldnn_layout():

x_shape = (32, 3, 224, 224)
x_npy = np.ones(x_shape, dtype='float32')
w_shape = (32, 3, 3, 3)
w_npy = np.ones(w_shape, dtype='float32')

x = mx.sym.Variable("x")
w = mx.sym.Variable("w")
z = mx.symbol.Convolution(data=x, weight=w, num_filter=32, kernel=(3, 3))
num_inputs = [2, 3, 4, 5]
for i in num_inputs:
inputs = []
for n in range(i):
inputs.append(z)
y = mx.sym.add_n(*inputs) # (only MKLDNN data input)
exe = y._simple_bind(ctx=mx.cpu(), x=x_shape, w=w_shape)
out = exe.forward(is_train=False, x=x_npy, w=np.ones(w_shape))[0]
#conv with kernel (3,3) on ones should give result=27
single_cov = 27.0
assert_almost_equal(out[0].asnumpy()[0, 0, 0], single_cov*i)

def test_mkldnn_sum_inplace_with_cpu_layout():
x_shape = (32, 3, 224, 224)
x_npy = np.ones(x_shape, dtype='float32')
Expand All @@ -227,7 +249,7 @@ def test_mkldnn_sum_inplace_with_cpu_layout():
x = mx.sym.Variable("x")
y = mx.sym.Variable("y")
z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3))
z = mx.sym.add_n(z, y)
z = mx.sym.add_n(z, y) # (MKLDNN data, cpu data)
exe = z._simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape)
out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0]
assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0)
Expand Down

0 comments on commit 84f8984

Please sign in to comment.