From 5591eb635c88e544804a30f0fafb7719b2926178 Mon Sep 17 00:00:00 2001 From: Bartlomiej Gawrych Date: Wed, 3 Nov 2021 10:13:42 +0100 Subject: [PATCH 1/4] Add oneDNN support for stack --- python/mxnet/amp/lists/symbol_fp16.py | 1 - .../_op_translations_opset12.py | 1 - .../_op_translations_opset13.py | 1 - src/operator/nn/dnnl/dnnl_base-inl.h | 7 +- src/operator/nn/dnnl/dnnl_concat-inl.h | 7 +- src/operator/nn/dnnl/dnnl_ops-inl.h | 6 + src/operator/nn/dnnl/dnnl_stack.cc | 113 ++++++++++++++++++ src/operator/numpy/np_matrix_op.cc | 40 ------- src/operator/numpy/np_matrix_op.cu | 2 - src/operator/tensor/matrix_op.cc | 40 +++++++ 10 files changed, 169 insertions(+), 49 deletions(-) create mode 100644 src/operator/nn/dnnl/dnnl_stack.cc diff --git a/python/mxnet/amp/lists/symbol_fp16.py b/python/mxnet/amp/lists/symbol_fp16.py index a1404d512834..a6d8a5396c46 100644 --- a/python/mxnet/amp/lists/symbol_fp16.py +++ b/python/mxnet/amp/lists/symbol_fp16.py @@ -673,7 +673,6 @@ '_npi_not_equal', '_npi_dstack', '_npi_hstack', - '_npi_stack', '_npi_tensordot', '_npi_tensordot_int_axes', '_npi_vstack', diff --git a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py index 7e0cd8d43408..461a2dde92cd 100644 --- a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py +++ b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py @@ -3191,7 +3191,6 @@ def convert_embedding(node, **kwargs): @mx_op.register("stack") -@mx_op.register("_npi_stack") def convert_stack(node, **kwargs): """Map MXNet's stack operator to onnx operators. """ diff --git a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py index c6452bee450d..f00237700f84 100644 --- a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py +++ b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py @@ -551,7 +551,6 @@ def convert_expand_dims(node, **kwargs): @mx_op.register("stack", OPSET_VERSION) -@mx_op.register("_npi_stack", OPSET_VERSION) def convert_stack(node, **kwargs): """Map MXNet's stack operator to onnx operators. """ diff --git a/src/operator/nn/dnnl/dnnl_base-inl.h b/src/operator/nn/dnnl/dnnl_base-inl.h index 4bf8b372b4ef..269033bb2feb 100644 --- a/src/operator/nn/dnnl/dnnl_base-inl.h +++ b/src/operator/nn/dnnl/dnnl_base-inl.h @@ -197,6 +197,7 @@ bool SupportDNNLTranspose(const NDArray& data); bool SupportDNNLBatchDot(const std::vector& inputs, const NDArray& output); bool SupportDNNLLayerNorm(const LayerNormParam& param, const std::vector& inputs); bool SupportDNNLReshape(const NDArray& input, const NDArray& output); +bool SupportDNNLStack(const std::vector& inputs); } // namespace op static int GetTypeSize(int dtype) { @@ -607,9 +608,9 @@ class DNNLMemory { dnnl::memory::data_type data_type = dnnl::memory::data_type::undef) const { dnnl::memory::dims dims(desc.data.dims, desc.data.dims + desc.data.ndims); dnnl::memory::data_type cpp_type = - (data_type == dnnl::memory::data_type::undef) - ? static_cast(desc.data.data_type) - : data_type; + (data_type == dnnl::memory::data_type::undef) ? + static_cast(desc.data.data_type) : + data_type; dnnl::memory::desc data_md(dims, cpp_type, static_cast(format)); return data_md; } diff --git a/src/operator/nn/dnnl/dnnl_concat-inl.h b/src/operator/nn/dnnl/dnnl_concat-inl.h index 4646137aa6d4..3fd36cfdcee6 100644 --- a/src/operator/nn/dnnl/dnnl_concat-inl.h +++ b/src/operator/nn/dnnl/dnnl_concat-inl.h @@ -52,13 +52,18 @@ class DNNLConcatFwd { static DNNLConcatFwd& GetConcatForward(int concat_dim, const std::vector& in_data, - const std::vector& data_md) { + const std::vector& data_md, + int cache_dim = -1) { #if DMLC_CXX11_THREAD_LOCAL static thread_local std::unordered_map fwds; #else static MX_THREAD_LOCAL std::unordered_map fwds; #endif + if (cache_dim == -1) { + cache_dim = concat_dim; + } OpSignature key; + key.AddSign(cache_dim); key.AddSign(concat_dim); key.AddSign(in_data); diff --git a/src/operator/nn/dnnl/dnnl_ops-inl.h b/src/operator/nn/dnnl/dnnl_ops-inl.h index 6adc6ae43e8f..8db1e8adc1a5 100644 --- a/src/operator/nn/dnnl/dnnl_ops-inl.h +++ b/src/operator/nn/dnnl/dnnl_ops-inl.h @@ -180,6 +180,12 @@ void DNNLLayerNormBackward(const nnvm::NodeAttrs& attrs, void DNNLSum(const dnnl::memory& arr1, const dnnl::memory& arr2, const dnnl::memory& out); +void DNNLStackForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& in_data, + const std::vector& req, + const std::vector& out_data); + template void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, diff --git a/src/operator/nn/dnnl/dnnl_stack.cc b/src/operator/nn/dnnl/dnnl_stack.cc new file mode 100644 index 000000000000..ea80ecade875 --- /dev/null +++ b/src/operator/nn/dnnl/dnnl_stack.cc @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file dnnl_stack.cc + */ + +#include "./dnnl_base-inl.h" +#include "./dnnl_concat-inl.h" +#include "./dnnl_ops-inl.h" + +#include "../../tensor/matrix_op-inl.h" + +#if MXNET_USE_ONEDNN == 1 +namespace mxnet { +namespace op { + +bool SupportDNNLStack(const std::vector& inputs) { + if (inputs[0].dtype() != mshadow::kFloat32 && inputs[0].dtype() != mshadow::kBfloat16) + return false; + + int src_dtype = inputs[0].dtype(); + for (auto& arr : inputs) { + if (arr.dtype() != src_dtype) { + return false; + } + // DO not support zero-size tensors. + if (arr.shape().Size() == 0) + return false; + int ndim = arr.shape().ndim(); + if (ndim <= 0) + return false; + } + return true; +} + +void DNNLStackForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& in_data, + const std::vector& req, + const std::vector& out_data) { + TmpMemMgr::Get()->Init(ctx.requested[concat_enum::kTempSpace]); + + const StackParam& param = dmlc::get(attrs.parsed); + const int axis = CheckAxis(param.axis, out_data[0].shape().ndim()); + const auto oshape = out_data[0].shape(); + const int src_dtype = in_data[0].dtype(); + const int dst_dtype = out_data[0].dtype(); + int leading = 1; + int trailing = 1; + + for (int i = 0; i < axis; ++i) { + leading *= oshape[i]; + } + for (int i = axis + 1; i < oshape.ndim(); ++i) { + trailing *= oshape[i]; + } + int mid = oshape[axis]; + + std::vector data_md; + std::vector data_mem; + dnnl::memory::desc in_md( + {leading, 1, trailing}, get_dnnl_type(src_dtype), dnnl::memory::format_tag::abc); + dnnl::memory::desc out_md( + {leading, mid, trailing}, get_dnnl_type(dst_dtype), dnnl::memory::format_tag::any); + + const int num_in_data = in_data.size(); + data_md.reserve(num_in_data); + data_mem.reserve(num_in_data); + + MSHADOW_TYPE_SWITCH(src_dtype, DType, { + for (int i = 0; i < num_in_data; i++) { + NDArray tmp = in_data[i].IsDNNLData() ? in_data[i].Reorder2Default() : in_data[i]; + dnnl::memory tmp_mem(in_md, CpuEngine::Get()->get_engine(), tmp.data().dptr()); + data_mem.emplace_back(tmp_mem); + data_md.emplace_back(in_md); + } + }); + + auto& fwd = GetConcatForward(1, in_data, data_md, axis); + mxnet::dnnl_output_t out_mem = + CreateDNNLMem(out_data[concat_enum::kOut], fwd.fwd_pd.dst_desc(), req[concat_enum::kOut]); + + std::unordered_map net_args; + net_args.insert({DNNL_ARG_DST, *out_mem.second}); + for (int i = 0; i < num_in_data; i++) { + net_args.insert({DNNL_ARG_MULTIPLE_SRC + i, data_mem[i]}); + } + + DNNLStream::Get()->RegisterPrimArgs(fwd.GetFwd(), net_args); + CommitOutput(out_data[concat_enum::kOut], out_mem); + DNNLStream::Get()->Submit(); +} + +} // namespace op +} // namespace mxnet +#endif diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 14a5597e3f0e..2acccc0ecde7 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -638,46 +638,6 @@ struct NumpyConcatGrad { return MakeGradNode(op_name, n, heads, n->attrs.dict); } }; -NNVM_REGISTER_OP(_npi_stack) - .describe(R"code(Join a sequence of arrays along a new axis. - -The axis parameter specifies the index of the new axis in the dimensions of the -result. For example, if axis=0 it will be the first dimension and if axis=-1 it -will be the last dimension. - -Examples:: - - x = [1, 2] - y = [3, 4] - - stack(x, y) = [[1, 2], - [3, 4]] - stack(x, y, axis=1) = [[1, 3], - [2, 4]] -)code") - .set_num_inputs([](const nnvm::NodeAttrs& attrs) { - const StackParam& param = dmlc::get(attrs.parsed); - return static_cast(param.num_args); - }) - .set_num_outputs(1) - .set_attr_parser(ParamParser) - .set_attr("FListInputNames", - [](const NodeAttrs& attrs) { - uint32_t num_args = - dmlc::get(attrs.parsed).num_args; - std::vector ret; - for (uint32_t i = 0; i < num_args; ++i) { - ret.push_back(std::string("arg") + std::to_string(i)); - } - return ret; - }) - .set_attr("key_var_num_args", "num_args") - .set_attr("FInferShape", StackOpShape) - .set_attr("FInferType", ElemwiseType<-1, 1>) - .set_attr("FCompute", StackOpForward) - .set_attr("FGradient", ElemwiseGradUseNone{"_backward_stack"}) - .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") - .add_arguments(StackParam::__FIELDS__()); bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, std::vector* in_type, diff --git a/src/operator/numpy/np_matrix_op.cu b/src/operator/numpy/np_matrix_op.cu index 7b7a3bd4b133..f2078146c78e 100644 --- a/src/operator/numpy/np_matrix_op.cu +++ b/src/operator/numpy/np_matrix_op.cu @@ -34,8 +34,6 @@ NNVM_REGISTER_OP(_np_reshape).set_attr("FCompute", UnaryOp::Ident NNVM_REGISTER_OP(_npi_squeeze).set_attr("FCompute", UnaryOp::IdentityCompute); -NNVM_REGISTER_OP(_npi_stack).set_attr("FCompute", StackOpForward); - NNVM_REGISTER_OP(_npi_vstack).set_attr("FCompute", NumpyVstackForward); NNVM_REGISTER_OP(_backward_np_vstack).set_attr("FCompute", NumpyVstackBackward); diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 9e04cd064e0d..39c7d54fe5ec 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -930,7 +930,38 @@ NNVM_REGISTER_OP(_backward_reverse) }) .set_attr("FCompute", ReverseOpForward); +#if MXNET_USE_ONEDNN == 1 +static void StackForwardEx(const nnvm::NodeAttrs& attrs, + const OpContext& op_ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + CHECK(!inputs.empty()); + CHECK_EQ(outputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + if (req[0] == kNullOp) + return; + + if (SupportDNNLStack(inputs)) { + DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs); + DNNLRun(DNNLStackForward, attrs, op_ctx, inputs, req, outputs); + DNNL_OPCHECK_RUN(StackOpForward, attrs, op_ctx, inputs, req, outputs); + } else { + FallBackCompute(StackOpForward, attrs, op_ctx, inputs, req, outputs); + } +} + +inline static bool StackInferStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_attrs, + std::vector* out_attrs) { + return DNNLStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs); +} +#endif // MXNET_USE_ONEDNN == 1 + NNVM_REGISTER_OP(stack) + .add_alias("_npi_stack") .describe(R"code(Join a sequence of arrays along a new axis. The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if axis=0 it will be the first dimension and if axis=-1 it @@ -965,6 +996,15 @@ Examples:: .set_attr("FInferShape", StackOpShape) .set_attr("FInferType", ElemwiseType<-1, 1>) .set_attr("FCompute", StackOpForward) +#if MXNET_USE_ONEDNN == 1 + .set_attr("FComputeEx", StackForwardEx) + .set_attr("TIsDNNL", true) + .set_attr("FResourceRequest", + [](const NodeAttrs& n) { + return std::vector{ResourceRequest::kTempSpace}; + }) + .set_attr("FInferStorageType", StackInferStorageType) +#endif .set_attr("FGradient", ElemwiseGradUseNone{"_backward_stack"}) .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") .add_arguments(StackParam::__FIELDS__()); From be8f36970845eb6870d8e0c3c9e27973bcbbaee3 Mon Sep 17 00:00:00 2001 From: Bartlomiej Gawrych Date: Mon, 8 Nov 2021 16:25:30 +0100 Subject: [PATCH 2/4] review --- src/operator/nn/dnnl/dnnl_stack.cc | 40 +++++++++++++++++++----------- src/operator/tensor/matrix_op.cc | 3 ++- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_stack.cc b/src/operator/nn/dnnl/dnnl_stack.cc index ea80ecade875..c77bb5b384aa 100644 --- a/src/operator/nn/dnnl/dnnl_stack.cc +++ b/src/operator/nn/dnnl/dnnl_stack.cc @@ -32,20 +32,24 @@ namespace mxnet { namespace op { bool SupportDNNLStack(const std::vector& inputs) { - if (inputs[0].dtype() != mshadow::kFloat32 && inputs[0].dtype() != mshadow::kBfloat16) + if (inputs[0].dtype() != mshadow::kFloat32 && inputs[0].dtype() != mshadow::kBfloat16) { return false; + } int src_dtype = inputs[0].dtype(); - for (auto& arr : inputs) { + for (const auto& arr : inputs) { if (arr.dtype() != src_dtype) { return false; } // DO not support zero-size tensors. - if (arr.shape().Size() == 0) + if (arr.shape().Size() == 0) { return false; + } + int ndim = arr.shape().ndim(); - if (ndim <= 0) + if (ndim <= 0) { return false; + } } return true; } @@ -57,28 +61,34 @@ void DNNLStackForward(const nnvm::NodeAttrs& attrs, const std::vector& out_data) { TmpMemMgr::Get()->Init(ctx.requested[concat_enum::kTempSpace]); + // const value of new dimension to stack + // tensors with oneDNN concat primitive + constexpr int stacking_dim = 1; + const StackParam& param = dmlc::get(attrs.parsed); const int axis = CheckAxis(param.axis, out_data[0].shape().ndim()); - const auto oshape = out_data[0].shape(); + const TShape oshape = out_data[0].shape(); const int src_dtype = in_data[0].dtype(); const int dst_dtype = out_data[0].dtype(); - int leading = 1; - int trailing = 1; + const int mid_dim = oshape[axis]; + int leading_dim = 1; + int trailing_dim = 1; for (int i = 0; i < axis; ++i) { - leading *= oshape[i]; + leading_dim *= oshape[i]; } for (int i = axis + 1; i < oshape.ndim(); ++i) { - trailing *= oshape[i]; + trailing_dim *= oshape[i]; } - int mid = oshape[axis]; std::vector data_md; std::vector data_mem; - dnnl::memory::desc in_md( - {leading, 1, trailing}, get_dnnl_type(src_dtype), dnnl::memory::format_tag::abc); - dnnl::memory::desc out_md( - {leading, mid, trailing}, get_dnnl_type(dst_dtype), dnnl::memory::format_tag::any); + dnnl::memory::desc in_md({leading_dim, stacking_dim, trailing_dim}, + get_dnnl_type(src_dtype), + dnnl::memory::format_tag::abc); + dnnl::memory::desc out_md({leading_dim, mid_dim, trailing_dim}, + get_dnnl_type(dst_dtype), + dnnl::memory::format_tag::any); const int num_in_data = in_data.size(); data_md.reserve(num_in_data); @@ -93,7 +103,7 @@ void DNNLStackForward(const nnvm::NodeAttrs& attrs, } }); - auto& fwd = GetConcatForward(1, in_data, data_md, axis); + auto& fwd = GetConcatForward(stacking_dim, in_data, data_md, axis); mxnet::dnnl_output_t out_mem = CreateDNNLMem(out_data[concat_enum::kOut], fwd.fwd_pd.dst_desc(), req[concat_enum::kOut]); diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 39c7d54fe5ec..2ec61f3001a1 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -939,8 +939,9 @@ static void StackForwardEx(const nnvm::NodeAttrs& attrs, CHECK(!inputs.empty()); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); - if (req[0] == kNullOp) + if (req[0] == kNullOp) { return; + } if (SupportDNNLStack(inputs)) { DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs); From b4c335de28531c56d8c0d4a4739c944919b2982c Mon Sep 17 00:00:00 2001 From: Bartlomiej Gawrych Date: Tue, 9 Nov 2021 10:52:13 +0100 Subject: [PATCH 3/4] review changes --- src/operator/nn/dnnl/dnnl_concat-inl.h | 8 +++----- src/operator/nn/dnnl/dnnl_stack.cc | 6 +++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_concat-inl.h b/src/operator/nn/dnnl/dnnl_concat-inl.h index 3fd36cfdcee6..294582ab47ea 100644 --- a/src/operator/nn/dnnl/dnnl_concat-inl.h +++ b/src/operator/nn/dnnl/dnnl_concat-inl.h @@ -53,18 +53,16 @@ class DNNLConcatFwd { static DNNLConcatFwd& GetConcatForward(int concat_dim, const std::vector& in_data, const std::vector& data_md, - int cache_dim = -1) { + int stack_axis = -1 /*used only by stack op*/) { #if DMLC_CXX11_THREAD_LOCAL static thread_local std::unordered_map fwds; #else static MX_THREAD_LOCAL std::unordered_map fwds; #endif - if (cache_dim == -1) { - cache_dim = concat_dim; - } + OpSignature key; - key.AddSign(cache_dim); key.AddSign(concat_dim); + key.AddSign(stack_axis); key.AddSign(in_data); auto it = fwds.find(key); diff --git a/src/operator/nn/dnnl/dnnl_stack.cc b/src/operator/nn/dnnl/dnnl_stack.cc index c77bb5b384aa..aabb66e9da99 100644 --- a/src/operator/nn/dnnl/dnnl_stack.cc +++ b/src/operator/nn/dnnl/dnnl_stack.cc @@ -61,8 +61,8 @@ void DNNLStackForward(const nnvm::NodeAttrs& attrs, const std::vector& out_data) { TmpMemMgr::Get()->Init(ctx.requested[concat_enum::kTempSpace]); - // const value of new dimension to stack - // tensors with oneDNN concat primitive + // const value of artificial new dimension to + // stack tensors on using oneDNN concat primitive constexpr int stacking_dim = 1; const StackParam& param = dmlc::get(attrs.parsed); @@ -96,7 +96,7 @@ void DNNLStackForward(const nnvm::NodeAttrs& attrs, MSHADOW_TYPE_SWITCH(src_dtype, DType, { for (int i = 0; i < num_in_data; i++) { - NDArray tmp = in_data[i].IsDNNLData() ? in_data[i].Reorder2Default() : in_data[i]; + NDArray tmp = in_data[i].Reorder2Default(); dnnl::memory tmp_mem(in_md, CpuEngine::Get()->get_engine(), tmp.data().dptr()); data_mem.emplace_back(tmp_mem); data_md.emplace_back(in_md); From 215b79b31368b0ea544a1d6c739661492f2a2e04 Mon Sep 17 00:00:00 2001 From: Bartlomiej Gawrych Date: Tue, 9 Nov 2021 11:05:12 +0100 Subject: [PATCH 4/4] add comments to false statement in macro --- src/operator/tensor/matrix_op.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 2ec61f3001a1..787eb5c5bd16 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -140,7 +140,8 @@ bool ReshapeStorageType(const nnvm::NodeAttrs& attrs, std::vector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); - return DNNLStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs); + return DNNLStorageType( + attrs, dev_mask, /*support_dnnl*/ true, dispatch_mode, in_attrs, out_attrs); } #endif @@ -944,7 +945,7 @@ static void StackForwardEx(const nnvm::NodeAttrs& attrs, } if (SupportDNNLStack(inputs)) { - DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs); + DNNL_OPCHECK_INIT(/*is backward*/ false, outputs.size(), inputs, outputs); DNNLRun(DNNLStackForward, attrs, op_ctx, inputs, req, outputs); DNNL_OPCHECK_RUN(StackOpForward, attrs, op_ctx, inputs, req, outputs); } else {