Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Relay/TOPI][Op] Add shape op in Relay and TOPI #2749

Merged
merged 8 commits into from
Mar 13, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/api/python/topi.rst
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ List of operators
topi.stack
topi.repeat
topi.tile
topi.shape
topi.layout_transform
topi.image.resize

Expand Down Expand Up @@ -136,6 +137,7 @@ topi
.. autofunction:: topi.stack
.. autofunction:: topi.repeat
.. autofunction:: topi.tile
.. autofunction:: topi.shape
.. autofunction:: topi.layout_transform

topi.nn
Expand Down
2 changes: 2 additions & 0 deletions docs/langref/relay_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@ This level support backpropagation of broadcast operators. It is temporary.
tvm.relay.broadcast_to_like
tvm.relay.collapse_sum_like
tvm.relay.slice_like
tvm.relay.shape_of
tvm.relay.layout_transform
tvm.relay.device_copy
tvm.relay.annotation.on_device
Expand Down Expand Up @@ -273,6 +274,7 @@ Level 10 Definitions
.. autofunction:: tvm.relay.broadcast_to_like
.. autofunction:: tvm.relay.collapse_sum_like
.. autofunction:: tvm.relay.slice_like
.. autofunction:: tvm.relay.shape_of
.. autofunction:: tvm.relay.layout_transform
.. autofunction:: tvm.relay.device_copy
.. autofunction:: tvm.relay.annotation.on_device
Expand Down
12 changes: 12 additions & 0 deletions include/tvm/relay/attrs/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ struct ClipAttrs : public tvm::AttrsNode<ClipAttrs> {
}
};

/*! \brief Attributes for LayoutTransform operator */
struct LayoutTransformAttrs : public tvm::AttrsNode<LayoutTransformAttrs> {
std::string src_layout;
std::string dst_layout;
Expand All @@ -229,6 +230,17 @@ struct LayoutTransformAttrs : public tvm::AttrsNode<LayoutTransformAttrs> {
}
};

/*! \brief Attributes for ShapeOf operator */
struct ShapeOfAttrs : public tvm::AttrsNode<ShapeOfAttrs> {
DataType dtype;

TVM_DECLARE_ATTRS(ShapeOfAttrs, "relay.attrs.ShapeOfAttrs") {
TVM_ATTR_FIELD(dtype)
.describe("Target data type")
.set_default(NullValue<DataType>());
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_TRANSFORM_H_
14 changes: 14 additions & 0 deletions python/tvm/relay/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,19 @@ def _mx_l2_normalize(inputs, attrs):
return _op.nn.l2_normalize(inputs[0], **new_attrs)


def _mx_shape_array(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("lhs_begin", None) is not None:
raise RuntimeError("shape_array doesn't support lhs_begin")
if attrs.get_int("lhs_end", None) is not None:
raise RuntimeError("shape_array doesn't support lhs_end")
if attrs.get_int("rhs_begin", None) is not None:
raise RuntimeError("shape_array doesn't support rhs_begin")
if attrs.get_int("rhs_end", None) is not None:
raise RuntimeError("shape_array doesn't support rhs_end")
return _op.shape_of(inputs[0], dtype='int64')


# Note: due to attribute conversion constraint
# ops in the identity set must be attribute free
_identity_list = [
Expand Down Expand Up @@ -613,6 +626,7 @@ def _mx_l2_normalize(inputs, attrs):
"repeat" : _mx_repeat,
"tile" : _mx_tile,
"BlockGrad" : _mx_BlockGrad,
"shape_array" : _mx_shape_array,
"SoftmaxOutput" : _mx_softmax_output,
"SoftmaxActivation" : _mx_softmax_activation,
# vision
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relay/op/_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
register_schedule("minimum", schedule_injective)
register_schedule("right_shift", schedule_injective)
register_schedule("left_shift", schedule_injective)
register_schedule("shape_of", schedule_injective)

# zeros
@register_compute("zeros")
Expand Down
19 changes: 19 additions & 0 deletions python/tvm/relay/op/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -713,3 +713,22 @@ def device_copy(data, src_dev, dst_dev):
raise ValueError("dst_dev is expected to be the type of TVMContext or "
"str, but received %s" % (type(dst_dev)))
return _make.device_copy(data, src_dev, dst_dev)


def shape_of(data, dtype="int32"):
"""Get shape of a tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The shape tensor.
"""
return _make.shape_of(data, dtype)
52 changes: 52 additions & 0 deletions src/relay/op/tensor/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <tvm/relay/op.h>
#include <tvm/relay/attrs/transform.h>
#include <topi/elemwise.h>
#include <topi/transform.h>
#include "../type_relations.h"
#include "../op_common.h"

Expand Down Expand Up @@ -189,5 +190,56 @@ RELAY_REGISTER_UNARY_OP("logical_not")
.set_support_level(4)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_UNARY_COMPUTE(topi::logical_not));


// shape_of
TVM_REGISTER_NODE_TYPE(ShapeOfAttrs);

bool ShapeOfRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(num_inputs, 1);
auto tt = types[0].as<TensorTypeNode>();
CHECK(tt != nullptr);
const auto* param = attrs.as<ShapeOfAttrs>();
CHECK(param != nullptr);
auto vector_out = tvm::Integer(tt->shape.size());
reporter->Assign(types[1], TensorTypeNode::make({ vector_out }, param->dtype));
return true;
}

Array<Tensor> ShapeOfCompute(const Attrs& attrs,
const Array<Tensor>& inputs,
const Type& out_type,
const Target& target) {
CHECK_EQ(inputs.size(), 1);
const auto* param = attrs.as<ShapeOfAttrs>();
CHECK(param != nullptr);
return {topi::shape(inputs[0], param->dtype)};
}

TVM_REGISTER_API("relay.op._make.shape_of")
.set_body_typed<Expr(Expr, DataType)>([](Expr data, DataType dtype) {
auto attrs = make_node<ShapeOfAttrs>();
attrs->dtype = dtype;
static const Op& op = Op::Get("shape_of");
return CallNode::make(op, {data}, Attrs(attrs), {});
});

RELAY_REGISTER_OP("shape_of")
.describe(R"code(Returns a tensor representing the shape of a tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.set_attrs_type_key("relay.attrs.ShapeOfAttrs")
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("ShapeOf", ShapeOfRel)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TOpPattern>("TOpPattern", kInjective)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_support_level(10)
.set_attr<FTVMCompute>("FTVMCompute", ShapeOfCompute);

} // namespace relay
} // namespace tvm
42 changes: 42 additions & 0 deletions src/relay/pass/fold_constant.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/interpreter.h>
#include <tvm/relay/attrs/transform.h>

namespace tvm {
namespace relay {
Expand Down Expand Up @@ -71,6 +72,7 @@ class ConstantFolder : public ExprMutator {

Expr VisitExpr_(const CallNode* call) final {
static auto op_stateful = Op::GetAttr<TOpIsStateful>("TOpIsStateful");
auto origin_args = call->args;
Expr res = ExprMutator::VisitExpr_(call);
call = res.as<CallNode>();
// We don't constant fold function with zero arguments.
Expand All @@ -81,6 +83,10 @@ class ConstantFolder : public ExprMutator {
if (op == nullptr) return res;
// skip stateful ops.
if (op_stateful.get(GetRef<Op>(op), false)) return res;
// Try to evaluate shape_of op
if (call->op.same_as(Op::Get("shape_of"))) {
return EvaluateShapeOf(res, origin_args, call->attrs);
}
bool all_const_args = true;
for (Expr arg : call->args) {
if (!checker_.Check(arg)) {
Expand Down Expand Up @@ -132,6 +138,42 @@ class ConstantFolder : public ExprMutator {
expr = InferType(expr, Module(nullptr));
return ValueToExpr(executor_(expr));
}
// Evaluate shape_of op
Expr EvaluateShapeOf(Expr expr, Array<Expr> args, Attrs attrs) {
Expr input = args[0];
const auto* param = attrs.as<ShapeOfAttrs>();
CHECK(param != nullptr);
tvm::Array<IndexExpr> ishape;
if (const ConstantNode* op = input.as<ConstantNode>()) {
ishape = op->tensor_type()->shape;
} else if (input->checked_type_.defined()) {
ishape = input->checked_type().as<TensorTypeNode>()->shape;
} else {
return expr;
}
// Get the constant shape
DLContext ctx;
ctx.device_type = kDLCPU;
ctx.device_id = 0;
auto val = runtime::NDArray::Empty(
{(int64_t)ishape.size()}, Type2TVMType(Int(32)), ctx);
int32_t* dims = static_cast<int32_t*>(val->data);
using ::tvm::ir::IntImm;
for (size_t i = 0; i < ishape.size(); ++i) {
if (const IntImm* dim = ishape[i].as<IntImm>()) {
dims[i] = dim->value;
} else {
return expr;
}
}
Expr shape = ValueToExpr(TensorValueNode::make(val));
// Cast the constant into correct dtype
auto cast_attrs = make_node<CastAttrs>();
cast_attrs->dtype = param->dtype;
static const Op& cast_op = Op::Get("cast");
Expr ret = CallNode::make(cast_op, {shape}, Attrs(cast_attrs), {});
return ConstEvaluate(ret);
}
};


Expand Down
17 changes: 17 additions & 0 deletions tests/python/frontend/mxnet/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,22 @@ def test_forward_l2_normalize():
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))


def test_forward_shape_array():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.shape_array(mx.nd.array(x_np))
mx_sym = mx.sym.shape_array(mx.sym.var("x"))
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1,))
verify((3, 4, 5))
verify((3, 4, 5, 6))


if __name__ == '__main__':
test_forward_mlp()
test_forward_vgg()
Expand Down Expand Up @@ -409,3 +425,4 @@ def test_forward_l2_normalize():
test_forward_slice_like()
test_forward_slice_axis()
test_forward_l2_normalize()
test_forward_shape_array()
15 changes: 15 additions & 0 deletions tests/python/relay/test_op_level10.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,10 +177,25 @@ def test_batch_matmul():
verify_batch_matmul((5, 16, 32), (5, 20, 32), (5, 16, 20))
verify_batch_matmul((30, 16, 32), (30, 20, 32), (30, 16, 20))

def test_shape_of():
shape = (10, 5, 12)
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.shape_of(x))
func = relay.ir_pass.infer_type(func)
x_data = np.random.rand(*shape).astype('float32')
for target, ctx in ctx_list():
# Because using graph executor, this op will be optimized after
# constant folding pass, here we only test with interpreter
for kind in ["debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(),
np.array(shape).astype('int32'))

if __name__ == "__main__":
test_collapse_sum_like()
test_broadcast_to_like()
test_slice_like()
test_reverse_reshape()
test_batch_matmul()
test_shape_of()
26 changes: 26 additions & 0 deletions tests/python/relay/test_pass_fold_constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,34 @@ def expected():
assert relay.ir_pass.graph_equal(zz, zexpected)


def test_fold_shape_of():
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.shape_of(x + y, dtype)
return relay.Function([x, y], z)

def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype)
return relay.ir_pass.infer_type(relay.Function([x, y], z))

for dtype in ["int32", "float32"]:
zbefore = before(dtype)
zz = relay.ir_pass.fold_constant(zbefore)
assert relay.ir_pass.graph_equal(zz, zbefore)

zz = relay.ir_pass.infer_type(zbefore)
zz = relay.ir_pass.fold_constant(zz)
zexpected = expected(dtype)
assert relay.ir_pass.graph_equal(zz, zexpected)


if __name__ == "__main__":
test_fold_const()
test_fold_let()
test_fold_tuple()
test_fold_concat()
test_fold_shape_of()
23 changes: 23 additions & 0 deletions topi/include/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -1081,5 +1081,28 @@ inline Tensor layout_transform(const Tensor& src,
}, name, tag);
}

/*!
* \brief Get the shape of input tensor.
* \param src the input tensor.
* \param name output tensor name.
* \param tag output tensor tag.
* \return Tensor of input shape.
*/
inline Tensor shape(const Tensor& src,
Type dtype,
const std::string name = "shape",
const std::string tag = kInjective) {
int ndim = static_cast<int>(src->shape.size());
Array<Expr> out_shape{ndim};
return compute(out_shape, [&](const Array<Var>& indices) {
auto idx = indices[0];
Expr ret = 0;
for (int i = 0; i < ndim; ++i) {
ret = tvm::if_then_else(idx == i, src->shape[i], ret);
}
return tvm::cast(dtype, ret);
}, name, tag);
}

} // namespace topi
#endif // TOPI_TRANSFORM_H_
19 changes: 19 additions & 0 deletions topi/python/topi/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,3 +393,22 @@ def layout_transform(array, src_layout, dst_layout):
the destination layout.
"""
return cpp.layout_transform(array, src_layout, dst_layout)


def shape(array, dtype="int32"):
"""Get the shape of input array
Parameters
----------
array : tvm.Tensor
The source tenosr.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.Tensor
The resulting tensor.
"""
return cpp.shape(array, dtype)
Loading