From 3d77cd9448f3764f961877acb1dee441b4fe04d9 Mon Sep 17 00:00:00 2001 From: Zhi <5145158+zhiics@users.noreply.github.com> Date: Fri, 10 Jul 2020 11:03:23 -0700 Subject: [PATCH] [REFACTOR][RELAY] Move invoke_tvm_op and shape_func to vm dialect (#5958) * [REFACTOR][RELAY] Move invoke_tvm_op and shape_func to vm dialect * address comments --- include/tvm/relay/attrs/memory.h | 13 --- include/tvm/relay/attrs/vm.h | 47 ++++++++ python/tvm/relay/op/__init__.py | 2 +- python/tvm/relay/op/memory/memory.py | 40 ------- python/tvm/relay/op/vm/__init__.py | 2 +- python/tvm/relay/op/vm/vm.py | 48 ++++++++ python/tvm/relay/transform/memory_alloc.py | 4 +- src/relay/backend/vm/compiler.cc | 4 +- src/relay/op/memory/memory.cc | 124 -------------------- src/relay/op/vm/vm.cc | 127 +++++++++++++++++++++ src/relay/transforms/fold_constant.cc | 4 +- 11 files changed, 230 insertions(+), 185 deletions(-) create mode 100644 include/tvm/relay/attrs/vm.h diff --git a/include/tvm/relay/attrs/memory.h b/include/tvm/relay/attrs/memory.h index 7429c396ea00..b737103e715f 100644 --- a/include/tvm/relay/attrs/memory.h +++ b/include/tvm/relay/attrs/memory.h @@ -74,19 +74,6 @@ struct AllocTensorAttrs : public tvm::AttrsNode { } }; -/*! - * \brief Options for the shape function operator. - */ -struct ShapeFuncAttrs : public tvm::AttrsNode { - Array is_input; - - TVM_DECLARE_ATTRS(ShapeFuncAttrs, "relay.attrs.ShapeFuncAttrs") { - TVM_ATTR_FIELD(is_input).describe( - "A bool indicating whether the shape function should" - "expect shape or input in each position."); - } -}; - } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_MEMORY_H_ diff --git a/include/tvm/relay/attrs/vm.h b/include/tvm/relay/attrs/vm.h new file mode 100644 index 000000000000..9144f4734e12 --- /dev/null +++ b/include/tvm/relay/attrs/vm.h @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file tvm/relay/attrs/vm.h + * \brief Attributes for Relay vm operators. + */ +#ifndef TVM_RELAY_ATTRS_VM_H_ +#define TVM_RELAY_ATTRS_VM_H_ + +#include + +namespace tvm { +namespace relay { + +/*! + * \brief Options for the shape function operator. + */ +struct ShapeFuncAttrs : public tvm::AttrsNode { + Array is_input; + + TVM_DECLARE_ATTRS(ShapeFuncAttrs, "relay.attrs.ShapeFuncAttrs") { + TVM_ATTR_FIELD(is_input).describe( + "A bool indicating whether the shape function should" + "expect shape or input in each position."); + } +}; + +} // namespace relay +} // namespace tvm +#endif // TVM_RELAY_ATTRS_VM_H_ diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py index a45d466a2623..011042bd0afc 100644 --- a/python/tvm/relay/op/__init__.py +++ b/python/tvm/relay/op/__init__.py @@ -27,7 +27,7 @@ from .tensor import * from .transform import * from .algorithm import * -from .vm import * +from . import vm from . import nn from . import annotation from . import memory diff --git a/python/tvm/relay/op/memory/memory.py b/python/tvm/relay/op/memory/memory.py index 4092545d552c..b426a0ea37cd 100644 --- a/python/tvm/relay/op/memory/memory.py +++ b/python/tvm/relay/op/memory/memory.py @@ -19,27 +19,6 @@ from __future__ import absolute_import as _abs from . import _make -def invoke_tvm_op(func, inputs, outputs): - """Call a primitive function with the TVM operator calling convention. - - Parameters - ---------- - func : tvm.relay.Expr - The input expr. - - inputs : tvm.relay.Expr - A tuple of the inputs to pass to the TVM function. - - outputs : tvm.relay.Expr - A tuple of the outputs to pass to the TVM function. - - Returns - ------- - result : tvm.relay.Expr - The invoke_tvm_op call node. - """ - return _make.invoke_tvm_op(func, inputs, outputs) - def alloc_tensor(storage, offset, shape, dtype='float32', assert_shape=None): """Allocate a tensor with the provided shape, and dtype. @@ -85,25 +64,6 @@ def alloc_storage(size, alignment, ctx, dtype_hint='float32'): """ return _make.alloc_storage(size, alignment, ctx, dtype_hint) -def shape_func(func, inputs, outputs, dependent=False): - """Invoke the shape function of the passed function. - - Parameters - ---------- - func : tvm.relay.Expr - The primitive function from which to compute the shape function. - inputs : tvm.relay.Tuple - The tupled inputs. - outputs : tvm.relay.Tuple - The tupled outputs. - - Returns - ------- - result : tvm.relay.Expr - The shape function expression. - """ - return _make.shape_func(func, inputs, outputs, dependent) - def flatten_tuple_type(ty): """Return a sequence of the types contained in the tuple type in order. diff --git a/python/tvm/relay/op/vm/__init__.py b/python/tvm/relay/op/vm/__init__.py index 2ac1e5743cb1..7e128c9334ce 100644 --- a/python/tvm/relay/op/vm/__init__.py +++ b/python/tvm/relay/op/vm/__init__.py @@ -17,4 +17,4 @@ # pylint: disable=wildcard-import """Dialect operators for Relay VM.""" from __future__ import absolute_import as _abs -from . import vm +from .vm import * diff --git a/python/tvm/relay/op/vm/vm.py b/python/tvm/relay/op/vm/vm.py index 680729df88eb..761188ace03a 100644 --- a/python/tvm/relay/op/vm/vm.py +++ b/python/tvm/relay/op/vm/vm.py @@ -33,3 +33,51 @@ def shape_of(expr): The expression with the evaluated tensor shape. """ return _ffi_api.shape_of(expr) + + +def invoke_tvm_op(func, inputs, outputs): + """Call a primitive function with the TVM operator calling convention. + + Parameters + ---------- + func : tvm.relay.Expr + The input expr. + + inputs : tvm.relay.Expr + A tuple of the inputs to pass to the TVM function. + + outputs : tvm.relay.Expr + A tuple of the outputs to pass to the TVM function. + + Returns + ------- + result : tvm.relay.Expr + The invoke_tvm_op call node. + """ + return _ffi_api.invoke_tvm_op(func, inputs, outputs) + + +def shape_func(func, inputs, outputs, is_inputs): + """Invoke the shape function of the passed function. + + Parameters + ---------- + func : tvm.relay.Expr + The primitive function from which to compute the shape function. + + inputs : tvm.relay.Tuple + The tupled inputs. + + outputs : tvm.relay.Tuple + The tupled outputs. + + is_inputs : List[bool] + A boolean list indicating whether the shape function should expect + shape or input at each position. + + Returns + ------- + result : tvm.relay.Expr + The shape function expression. + """ + return _ffi_api.shape_func(func, inputs, outputs, is_inputs) diff --git a/python/tvm/relay/transform/memory_alloc.py b/python/tvm/relay/transform/memory_alloc.py index a7ba2a8a5678..805905c0c18f 100644 --- a/python/tvm/relay/transform/memory_alloc.py +++ b/python/tvm/relay/transform/memory_alloc.py @@ -42,8 +42,8 @@ class ManifestAllocPass(ExprMutator): """A pass for explicitly manifesting all memory allocations in Relay.""" def __init__(self, target_host): - self.invoke_tvm = op.memory.invoke_tvm_op - self.shape_func = op.memory.shape_func + self.invoke_tvm = op.vm.invoke_tvm_op + self.shape_func = op.vm.shape_func self.shape_of = op.vm.shape_of self.scopes = [ScopeBuilder()] self.target_host = target_host diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc index 2151acfb216f..d01dbda24a4c 100644 --- a/src/relay/backend/vm/compiler.cc +++ b/src/relay/backend/vm/compiler.cc @@ -519,7 +519,7 @@ class VMFunctionCompiler : ExprFunctor { if (op.as()) { OpMatch matcher; matcher - .Match("memory.invoke_tvm_op", + .Match("vm.invoke_tvm_op", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { CHECK_EQ(args.size(), 3); EmitInvokeTVMOp(Downcast(args[0]), args[1], args[2]); @@ -581,7 +581,7 @@ class VMFunctionCompiler : ExprFunctor { Emit(Instruction::AllocStorage(size_register, alignment, dtype, NewRegister())); }) - .Match("memory.shape_func", + .Match("vm.shape_func", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { CHECK_EQ(args.size(), 3); auto shape_func = Downcast(args[0]); diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index e5081adbf6a7..de73b44aed4d 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -38,7 +38,6 @@ namespace relay { TVM_REGISTER_NODE_TYPE(AllocStorageAttrs); TVM_REGISTER_NODE_TYPE(AllocTensorAttrs); -TVM_REGISTER_NODE_TYPE(ShapeFuncAttrs); // The passing value in attrs and args doesn't seem super great. // We should consider a better solution, i.e the type relation @@ -197,54 +196,6 @@ RELAY_REGISTER_OP("memory.alloc_tensor") return {topi::identity(inputs[0])}; }); -bool InvokeTVMOPRel(const Array& types, int num_inputs, const Attrs& attrs, - const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4u); - auto func_type = types[0].as(); - CHECK(func_type != nullptr) << "input must be operator with known type"; - auto input_type = types[1].as(); - auto output_type = types[2].as(); - CHECK(input_type != nullptr) - << "internal invariant violated: invoke_tvm_op inputs must be a tuple"; - CHECK(output_type != nullptr) - << "internal invariant violated: invoke_tvm_op outputs must be a tuple"; - Type ex_output; - if (func_type->ret_type.as()) { - ex_output = TupleType({func_type->ret_type}); - } else { - CHECK(func_type->ret_type.as()) << "should be tuple type"; - ex_output = func_type->ret_type; - } - auto ex_input = TupleType(func_type->arg_types); - reporter->Assign(ex_input, GetRef(input_type)); - reporter->Assign(ex_output, GetRef(output_type)); - reporter->Assign(types[3], TupleType::Empty()); - return true; -} - -TVM_REGISTER_GLOBAL("relay.op.memory._make.invoke_tvm_op") - .set_body_typed([](Expr func, Expr inputs, Expr outputs) { - return Call(Op::Get("memory.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); - }); - -RELAY_REGISTER_OP("memory.invoke_tvm_op") - .describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE) - .set_num_inputs(3) - .add_argument("op", "Function", "The operation to call") - .add_argument("ins", "Tuple", "The input tensors.") - .add_argument("outs", "Tuple", "The output tensors.") - .add_type_rel("InvokeTVMOP", InvokeTVMOPRel) - .set_support_level(10) - .set_attr("TOpPattern", kOpaque) - .set_attr("TOpIsStateful", false) - .set_attr("TNonComputational", true) - .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) - .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype) -> Array { - return {topi::identity(inputs[0])}; - }); - bool KillRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { CHECK_EQ(types.size(), 2u); @@ -269,14 +220,6 @@ RELAY_REGISTER_OP("memory.kill") return {topi::identity(inputs[0])}; }); -TVM_REGISTER_GLOBAL("relay.op.memory._make.shape_func") - .set_body_typed([](Expr func, Expr inputs, Expr outputs, Array is_input) { - static const Op& op = Op::Get("memory.shape_func"); - auto attrs = make_object(); - attrs->is_input = is_input; - return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); - }); - static void FlattenTupleTypeAux(const Type& type, std::vector* out) { if (auto tt = type.as()) { out->push_back(GetRef(tt)); @@ -356,72 +299,5 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.ToTupleType") return ToTupleType(t, std::vector(array.begin(), array.end())); }); -bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, - const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4u); - auto shape_func_attrs = attrs.as(); - CHECK(shape_func_attrs != nullptr) << "Internal compiler error"; - - auto func_type = types[0].as(); - CHECK(func_type != nullptr); - - auto tuple = TupleType(func_type->arg_types); - auto in_types = FlattenTupleType(tuple); - auto out_types = FlattenTupleType(func_type->ret_type); - Array is_input; - for (size_t i = 0; i < func_type->arg_types.size(); ++i) { - auto const& aty = func_type->arg_types[i]; - size_t num_types = 1; - if (aty.as()) { - num_types = FlattenTupleType(aty).size(); - } - for (size_t j = 0; j < num_types; ++j) { - is_input.push_back(shape_func_attrs->is_input[i]); - } - } - - Array shape_func_ins, shape_func_outs; - for (size_t i = 0; i < in_types.size(); i++) { - auto in_type = in_types[i]; - - if (is_input[i]) { - shape_func_ins.push_back(in_type); - } else { - auto shape = RankShape(in_type->shape); - shape_func_ins.push_back(TensorType(shape, DataType::Int(64))); - } - } - - for (auto out_type : out_types) { - auto rank_shape = RankShape(out_type->shape); - shape_func_outs.push_back(TensorType(rank_shape, DataType::Int(64))); - } - - auto input_type = TupleType(shape_func_ins); - auto output_type = TupleType(shape_func_outs); - - reporter->Assign(types[1], input_type); - reporter->Assign(types[2], output_type); - reporter->Assign(types[3], TupleType::Empty()); - - return true; -} - -RELAY_REGISTER_OP("memory.shape_func") - .describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE) - .set_num_inputs(3) - .add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.") - .add_type_rel("ShapeFuncRel", ShapeFuncRel) - .set_support_level(10) - .set_attr("TOpPattern", kOpaque) - .set_attr("TOpIsStateful", false) - .set_attr("TNonComputational", true) - .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) - .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype) -> Array { - return {topi::identity(inputs[0])}; - }); - } // namespace relay } // namespace tvm diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index af33100add31..ffe276e4493c 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -23,6 +23,8 @@ */ #include +#include +#include #include #include #include @@ -35,6 +37,8 @@ namespace tvm { namespace relay { +TVM_REGISTER_NODE_TYPE(ShapeFuncAttrs); + RELAY_REGISTER_OP("vm.shape_of") .describe(R"code(Get the shape of an input tensor. )code" TVM_ADD_FILELINE) @@ -54,5 +58,128 @@ TVM_REGISTER_GLOBAL("relay.op.vm.shape_of").set_body_typed([](Expr expr) { return Call(op, {expr}, Attrs(attrs), {}); }); +TVM_REGISTER_GLOBAL("relay.op.vm.shape_func") + .set_body_typed([](Expr func, Expr inputs, Expr outputs, Array is_input) { + static const Op& op = Op::Get("vm.shape_func"); + auto attrs = make_object(); + attrs->is_input = is_input; + return Call(op, {func, inputs, outputs}, Attrs(attrs), {}); + }); + +bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 4u); + auto shape_func_attrs = attrs.as(); + CHECK(shape_func_attrs != nullptr) << "Internal compiler error"; + + auto func_type = types[0].as(); + CHECK(func_type != nullptr); + + auto tuple = TupleType(func_type->arg_types); + auto in_types = FlattenTupleType(tuple); + auto out_types = FlattenTupleType(func_type->ret_type); + Array is_input; + for (size_t i = 0; i < func_type->arg_types.size(); ++i) { + auto const& aty = func_type->arg_types[i]; + size_t num_types = 1; + if (aty.as()) { + num_types = FlattenTupleType(aty).size(); + } + for (size_t j = 0; j < num_types; ++j) { + is_input.push_back(shape_func_attrs->is_input[i]); + } + } + + Array shape_func_ins, shape_func_outs; + for (size_t i = 0; i < in_types.size(); i++) { + auto in_type = in_types[i]; + + if (is_input[i]) { + shape_func_ins.push_back(in_type); + } else { + auto shape = RankShape(in_type->shape); + shape_func_ins.push_back(TensorType(shape, DataType::Int(64))); + } + } + + for (auto out_type : out_types) { + auto rank_shape = RankShape(out_type->shape); + shape_func_outs.push_back(TensorType(rank_shape, DataType::Int(64))); + } + + auto input_type = TupleType(shape_func_ins); + auto output_type = TupleType(shape_func_outs); + + reporter->Assign(types[1], input_type); + reporter->Assign(types[2], output_type); + reporter->Assign(types[3], TupleType::Empty()); + + return true; +} + +RELAY_REGISTER_OP("vm.shape_func") + .describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE) + .set_num_inputs(3) + .add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.") + .add_type_rel("ShapeFuncRel", ShapeFuncRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype) -> Array { + return {topi::identity(inputs[0])}; + }); + +bool InvokeTVMOpRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 4u); + auto func_type = types[0].as(); + CHECK(func_type != nullptr) << "input must be operator with known type"; + auto input_type = types[1].as(); + auto output_type = types[2].as(); + CHECK(input_type != nullptr) + << "internal invariant violated: invoke_tvm_op inputs must be a tuple"; + CHECK(output_type != nullptr) + << "internal invariant violated: invoke_tvm_op outputs must be a tuple"; + Type ex_output; + if (func_type->ret_type.as()) { + ex_output = TupleType({func_type->ret_type}); + } else { + CHECK(func_type->ret_type.as()) << "should be tuple type"; + ex_output = func_type->ret_type; + } + auto ex_input = TupleType(func_type->arg_types); + reporter->Assign(ex_input, GetRef(input_type)); + reporter->Assign(ex_output, GetRef(output_type)); + reporter->Assign(types[3], TupleType::Empty()); + return true; +} + +TVM_REGISTER_GLOBAL("relay.op.vm.invoke_tvm_op") + .set_body_typed([](Expr func, Expr inputs, Expr outputs) { + return Call(Op::Get("vm.invoke_tvm_op"), {func, inputs, outputs}, Attrs()); + }); + +RELAY_REGISTER_OP("vm.invoke_tvm_op") + .describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE) + .set_num_inputs(3) + .add_argument("op", "Function", "The operation to call") + .add_argument("ins", "Tuple", "The input tensors.") + .add_argument("outs", "Tuple", "The output tensors.") + .add_type_rel("InvokeTVMOp", InvokeTVMOpRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype) -> Array { + return {topi::identity(inputs[0])}; + }); + } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/fold_constant.cc b/src/relay/transforms/fold_constant.cc index d66d6bccdea1..0b873bf91bf1 100644 --- a/src/relay/transforms/fold_constant.cc +++ b/src/relay/transforms/fold_constant.cc @@ -82,8 +82,8 @@ class ConstantFolder : public ExprMutator { module_(module), shape_of_op_(Op::Get("shape_of")), vm_shape_of_op_(Op::Get("vm.shape_of")), - invoke_tvm_op_(Op::Get("memory.invoke_tvm_op")), - shape_func_op_(Op::Get("memory.shape_func")), + invoke_tvm_op_(Op::Get("vm.invoke_tvm_op")), + shape_func_op_(Op::Get("vm.shape_func")), alloc_tensor_op_(Op::Get("memory.alloc_tensor")), alloc_storage_op_(Op::Get("memory.alloc_storage")), cast_op_(Op::Get("cast")) {}