From a23592cdd3c28aae31ee0e5c713cfe02f5ad3cab Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 14 Jul 2020 15:43:07 -0700 Subject: [PATCH] [RELAY][DYN] Dynamic broadcast_to, zeros, ones (#6007) * Dynamic BroadcastTo * fixed lint! * add test_one_hot() back * add one_hot registration back * Dynamic BroadcastTo * fixed lint! * add one_hot registration back * fixed lint.. again * fixed lint * lint * responding to comments * skipping cuda in dynamic test * skipping cuda in dynamic test * fixed i386 test and GPU test * lint * starting ones and zeros * fixed dynamic ones and zeros, wrote dyn ones and zeros test * added static version of zeros, ones and added a check for size of types to static BroadCastToRel * added dynamic to static pass for zeros and ones, dynamic test and dynamic to static test * removed op_str in dyn to static pass test * fixed lint * fix lint hopefully * removed import const * removed import that was actually used * copy all attributes from broadcast_to, ones, zeros, full * responding to comments * fixed build error * finishing rebase * fix lint Co-authored-by: Lily Orth-Smith --- python/tvm/relay/_parser.py | 2 - python/tvm/relay/op/_tensor.py | 4 +- python/tvm/relay/op/dyn/__init__.py | 1 + python/tvm/relay/op/dyn/_tensor.py | 46 ++++++++ python/tvm/relay/op/dyn/_transform.py | 1 + python/tvm/relay/op/tensor.py | 15 ++- python/tvm/relay/op/transform.py | 6 +- src/relay/op/dyn/tensor/transform.cc | 109 ++++++++++++++++++ src/relay/op/make_op.h | 6 +- src/relay/op/tensor/transform.cc | 98 +++++++--------- src/relay/transforms/dynamic_to_static.cc | 18 ++- src/relay/transforms/pattern_util.h | 27 ++++- .../relay/dyn/test_dynamic_op_level10.py | 54 +++++++++ .../relay/dyn/test_dynamic_op_level3.py | 24 ++++ tests/python/relay/test_op_level10.py | 5 +- .../relay/test_pass_dynamic_to_static.py | 44 ++++++- 16 files changed, 384 insertions(+), 76 deletions(-) create mode 100644 python/tvm/relay/op/dyn/_tensor.py create mode 100644 tests/python/relay/dyn/test_dynamic_op_level10.py diff --git a/python/tvm/relay/_parser.py b/python/tvm/relay/_parser.py index ac60a1f7bb51..0d3f86f6262d 100644 --- a/python/tvm/relay/_parser.py +++ b/python/tvm/relay/_parser.py @@ -116,8 +116,6 @@ def __call__(self, args, attrs, type_args): attrs = {} if self.operator in (op.strided_slice,): x = self.operator(*args) - elif self.operator in (op.zeros, op.ones, op.full, op.broadcast_to): - x = self.operator(*args, dtype=attrs["dtype"]) else: x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()}) if isinstance(x, expr.TupleWrapper): diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index cd9e4ed050d2..d4911d95e90d 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -92,7 +92,7 @@ # zeros @register_compute("zeros") def zeros_compute(attrs, inputs, output_type): - assert len(inputs) == 1 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 0.0)] register_broadcast_schedule("zeros") @@ -109,7 +109,7 @@ def zeros_like_compute(attrs, inputs, output_type): # ones @register_compute("ones") def ones_compute(attrs, inputs, output_type): - assert len(inputs) == 1 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 1.0)] register_broadcast_schedule("ones") diff --git a/python/tvm/relay/op/dyn/__init__.py b/python/tvm/relay/op/dyn/__init__.py index f4d47a6d780c..967ecbc36bad 100644 --- a/python/tvm/relay/op/dyn/__init__.py +++ b/python/tvm/relay/op/dyn/__init__.py @@ -19,3 +19,4 @@ from . import _algorithm from . import _transform +from . import _tensor diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py new file mode 100644 index 000000000000..dc2835977fb9 --- /dev/null +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +#pylint: disable=invalid-name, unused-argument, len-as-condition +"""Backend compiler related feature registration for dynamic ops""" + +import topi + +from ..op import register_shape_func, register_compute +from ..op import register_broadcast_schedule +from ..op import register_pattern, OpPattern +from .._tensor import full_shape_func, no_data_full_shape_func + +# ones +@register_compute("dyn.ones") +def ones_compute(attrs, inputs, output_type): + assert len(inputs) == 1 + return [topi.full(output_type.shape, output_type.dtype, 1.0)] + +register_broadcast_schedule("dyn.ones") +register_pattern("dyn.ones", OpPattern.ELEMWISE) + +@register_compute("dyn.zeros") +def zeros_compute(attrs, inputs, output_type): + assert len(inputs) == 1 + return [topi.full(output_type.shape, output_type.dtype, 0.0)] + +register_broadcast_schedule("dyn.zeros") +register_pattern("dyn.zeros", OpPattern.ELEMWISE) + +register_shape_func("dyn.broadcast_to", True, full_shape_func) +register_shape_func("dyn.ones", True, no_data_full_shape_func) +register_shape_func("dyn.zeros", True, no_data_full_shape_func) diff --git a/python/tvm/relay/op/dyn/_transform.py b/python/tvm/relay/op/dyn/_transform.py index 8279b1249ced..e2704bc24e62 100644 --- a/python/tvm/relay/op/dyn/_transform.py +++ b/python/tvm/relay/op/dyn/_transform.py @@ -22,6 +22,7 @@ from tvm.te.hybrid import script from .. import op as _reg +_reg.register_broadcast_schedule("dyn.broadcast_to") _reg.register_injective_schedule("dyn.reshape") _reg.register_broadcast_schedule("dyn.tile") diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index c60dbee6dd64..a02e08d2deb7 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -20,7 +20,8 @@ from tvm.runtime import TVMContext as _TVMContext from . import _make -from ..expr import Tuple, const +from .dyn import _make as _dyn_make +from ..expr import Tuple, Expr # We create a wrapper function for each operator in the @@ -939,8 +940,12 @@ def zeros(shape, dtype): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn_make.zeros(shape, dtype) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = list(shape) return _make.zeros(shape, dtype) @@ -976,8 +981,12 @@ def ones(shape, dtype): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn_make.ones(shape, dtype) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = list(shape) return _make.ones(shape, dtype) diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 173db64de258..83008a9c1cc5 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -661,8 +661,12 @@ def broadcast_to(data, shape): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn_make.broadcast_to(data, shape) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = list(shape) return _make.broadcast_to(data, shape) def broadcast_to_like(data, broadcast_type): diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 0b8a15676cc3..007b3dd86028 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -23,12 +23,14 @@ */ #include "transform.h" +#include #include #include #include #include #include +#include #include namespace tvm { @@ -36,6 +38,7 @@ namespace relay { namespace dyn { /* relay.dyn.reshape */ + bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, newshape, result] @@ -195,6 +198,112 @@ RELAY_REGISTER_OP("dyn.tile") .set_attr("FTVMCompute", TileCompute) .set_attr("TOpPattern", kInjective); +// broadcast_to operator +bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [data_type, broadcast_shape_type, ret_type] + CHECK_EQ(types.size(), 3); + + const auto* target_shape = types[1].as(); + DataType out_dtype = types[0].as()->dtype; + // rank must be static + const IntImmNode* rank = target_shape->shape[0].as(); + CHECK(rank) << "Target shape must have static rank"; // rank must be static even in dyn pass + // could add support for dyn rank in futures + + std::vector oshape; + for (int i = 0; i < rank->value; ++i) { + oshape.push_back(Any()); + } + + reporter->Assign(types[2], TensorType(oshape, out_dtype)); + return true; +} + +Expr MakeBroadCastTo(Expr data, Expr shape) { + static const Op& op = Op::Get("dyn.broadcast_to"); + auto attrs = make_object(); + return Call(op, {data, shape}, Attrs(attrs), {}); +} + +Array BroadCastToCompute(const Attrs& attrs, const Array& inputs, + const Type& out_type) { + const auto* out_ttype = out_type.as(); + return {topi::broadcast_to(inputs[0], out_ttype->shape)}; +} + +TVM_REGISTER_GLOBAL("relay.op.dyn._make.broadcast_to").set_body_typed(MakeBroadCastTo); + +RELAY_REGISTER_OP("dyn.broadcast_to") + .describe(R"code(Broadcast the first input to match the shape argument. +)code" TVM_ADD_FILELINE) + .set_num_inputs(2) + .add_argument("data", "Tensor", "The input tensor.") + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(4) + .add_type_rel("DynamicBroadCastTo", BroadCastToRel) + .set_attr("FTVMCompute", BroadCastToCompute) + .set_attr("TOpPattern", kBroadcast); + +// zeros and ones operator +bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [zeros_shape, ret_type] + CHECK_EQ(types.size(), 2); + const InitOpAttrs* param = attrs.as(); + const auto* fill_shape = types[0].as(); + DataType out_dtype = param->dtype; + + const IntImmNode* shape_shape = fill_shape->shape[0].as(); + CHECK(shape_shape) << "Parameter shape must have static rank"; + + std::vector oshape; + for (int i = 0; i < shape_shape->value; ++i) { + oshape.push_back(Any()); + } + + reporter->Assign(types[1], TensorType(oshape, out_dtype)); + return true; +} + +Expr MakeZeros(Expr shape, DataType dtype) { + auto attrs = make_object(); + attrs->dtype = std::move(dtype); + static const Op& op = Op::Get("dyn.zeros"); + return Call(op, {shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.dyn._make.zeros").set_body_typed(MakeZeros); + +RELAY_REGISTER_OP("dyn.zeros") + .describe(R"code(Fill array with zeros. + +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(1) + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(3) + .add_type_rel("DynamicInitOp", InitOpRel); + +Expr MakeOnes(Expr shape, DataType dtype) { + auto attrs = make_object(); + attrs->dtype = std::move(dtype); + static const Op& op = Op::Get("dyn.ones"); + return Call(op, {shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.dyn._make.ones").set_body_typed(MakeOnes); + +RELAY_REGISTER_OP("dyn.ones") + .describe(R"code(Fill array with ones. + +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(1) + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(3) + .add_type_rel("DynamicInitOp", InitOpRel); + } // namespace dyn } // namespace relay } // namespace tvm diff --git a/src/relay/op/make_op.h b/src/relay/op/make_op.h index b5c7a526c658..3b5e9a195957 100644 --- a/src/relay/op/make_op.h +++ b/src/relay/op/make_op.h @@ -36,7 +36,7 @@ namespace tvm { namespace relay { -Expr MakeBroadCastTo(Expr data, Expr shape); +Expr MakeBroadCastTo(Expr data, Array shape); Expr MakeCast(Expr data, DataType dtype); @@ -52,7 +52,7 @@ Expr MakeFull(Expr fill_value, Expr shape, DataType dtype); Expr MakeLayoutTransform(Expr data, String src_layout, String dst_layout); -Expr MakeOnes(Expr shape, DataType dtype); +Expr MakeOnes(Array shape, DataType dtype); Expr MakePad(Expr data, Array> pad_width, double pad_value, String pad_mode); @@ -76,7 +76,7 @@ Expr MakeTopK(Expr data, int k, int axis, String ret_type, bool is_ascend, DataT Expr MakeVariance(Expr data, Expr mean, Array axis, bool keepdims, bool exclude); -Expr MakeZeros(Expr shape, DataType dtype); +Expr MakeZeros(Array shape, DataType dtype); } // namespace relay } // namespace tvm diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 9d5f248cb229..85e8671cf8d5 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -979,37 +979,29 @@ RELAY_REGISTER_OP("full") bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); - const InitOpAttrs* param = attrs.as(); - const auto* fill_shape = types[0].as(); - DataType out_dtype = param->dtype; + // types = [ret_type] + CHECK_EQ(types.size(), 1); - const IntImmNode* shape_shape = fill_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; + const InitOpAttrs* param = attrs.as(); + CHECK(param); + DataType out_dtype = param->dtype; std::vector oshape; - if (param->shape) { - const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { - oshape.push_back(cshape_array[i]); - } - } else { - for (int i = 0; i < shape_shape->value; ++i) { - oshape.push_back(Any()); - } + + const Array& cshape_array = param->shape.value(); + for (size_t i = 0; i < cshape_array.size(); ++i) { + oshape.push_back(cshape_array[i]); } - reporter->Assign(types[1], TensorType(oshape, out_dtype)); + reporter->Assign(types[0], TensorType(oshape, out_dtype)); return true; } -Expr MakeZeros(Expr shape, DataType dtype) { +Expr MakeZeros(Array shape, DataType dtype) { auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } + attrs->shape = std::move(shape); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("zeros"); - return Call(op, {shape}, Attrs(attrs), {}); + return Call(op, {}, Attrs(attrs), {}); } TVM_REGISTER_GLOBAL("relay.op._make.zeros").set_body_typed(MakeZeros); @@ -1019,19 +1011,16 @@ RELAY_REGISTER_OP("zeros") )code" TVM_ADD_FILELINE) .set_attrs_type() - .set_num_inputs(1) - .add_argument("shape", "Tensor", "Target shape.") + .set_num_inputs(0) .set_support_level(3) .add_type_rel("InitOp", InitOpRel); -Expr MakeOnes(Expr shape, DataType dtype) { +Expr MakeOnes(Array shape, DataType dtype) { auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } + attrs->shape = std::move(shape); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("ones"); - return Call(op, {shape}, Attrs(attrs), {}); + return Call(op, {}, Attrs(attrs), {}); } TVM_REGISTER_GLOBAL("relay.op._make.ones").set_body_typed(MakeOnes); @@ -1041,8 +1030,7 @@ RELAY_REGISTER_OP("ones") )code" TVM_ADD_FILELINE) .set_attrs_type() - .set_num_inputs(1) - .add_argument("shape", "Tensor", "Target shape.") + .set_num_inputs(0) .set_support_level(3) .add_type_rel("InitOp", InitOpRel); @@ -1784,20 +1772,21 @@ bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& att const TypeReporter& reporter) { CHECK_EQ(types.size(), 3); const InitOpAttrs* param = attrs.as(); + const auto* target_shape = types[1].as(); DataType out_dtype = types[0].as()->dtype; - const IntImmNode* shape_shape = target_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; + const IntImmNode* rank = target_shape->shape[0].as(); + CHECK(rank) << "Parameter must have static rank"; std::vector oshape; if (param->shape) { const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { + for (size_t i = 0; i < cshape_array.size(); i++) { oshape.push_back(cshape_array[i]); } } else { - for (int i = 0; i < shape_shape->value; ++i) { + for (int i = 0; i < rank->value; i++) { oshape.push_back(Any()); } } @@ -1827,39 +1816,31 @@ RELAY_REGISTER_OP("collapse_sum_to") .set_attr("FTVMCompute", CollapseSumLikeCompute) .set_attr("TOpPattern", kCommReduce); -// BroadCastTo: -> B where BroadCast(A, B) = B bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); - const InitOpAttrs* param = attrs.as(); - const auto* target_shape = types[1].as(); - DataType out_dtype = types[0].as()->dtype; + // types = [data_type, ret_type], broadcast_to_type is in attrs bc static + CHECK_EQ(types.size(), 2); - const IntImmNode* shape_shape = target_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; + const InitOpAttrs* param = attrs.as(); + CHECK(param); + DataType out_dtype = types[0].as()->dtype; std::vector oshape; - if (param->shape) { - const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { - oshape.push_back(cshape_array[i]); - } - } else { - for (int i = 0; i < shape_shape->value; ++i) { - oshape.push_back(Any()); - } + + const Array& cshape_array = param->shape.value(); + for (size_t i = 0; i < cshape_array.size(); ++i) { + oshape.push_back(cshape_array[i]); } - reporter->Assign(types[2], TensorType(oshape, out_dtype)); - return BroadcastRel({types[0], types[2], types[2]}, 2, Attrs(), reporter); + reporter->Assign(types[1], TensorType(oshape, out_dtype)); + return BroadcastRel({types[0], types[1], types[1]}, 2, Attrs(), reporter); } -Expr MakeBroadCastTo(Expr data, Expr shape) { +Expr MakeBroadCastTo(Expr data, Array shape) { static const Op& op = Op::Get("broadcast_to"); auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } - return Call(op, {data, shape}, Attrs(attrs), {}); + + attrs->shape = std::move(shape); + return Call(op, {data}, Attrs(attrs), {}); } Array BroadCastToCompute(const Attrs& attrs, const Array& inputs, @@ -1873,9 +1854,8 @@ TVM_REGISTER_GLOBAL("relay.op._make.broadcast_to").set_body_typed(MakeBroadCastT RELAY_REGISTER_OP("broadcast_to") .describe(R"code(Broadcast the first input to match the shape argument. )code" TVM_ADD_FILELINE) - .set_num_inputs(2) + .set_num_inputs(1) .add_argument("data", "Tensor", "The input tensor.") - .add_argument("shape", "Tensor", "Target shape.") .set_support_level(4) .add_type_rel("BroadCastTo", BroadCastToRel) .set_attr("FTVMCompute", BroadCastToCompute) diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index 359e1d335bfa..d4de15c6ee5a 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -55,6 +55,23 @@ class DynamicToStaticMutator : public MixedModeMutator { return MakeTopK(call_node->args[0], static_cast(ToScalar(k->data, 0)), param->axis, param->ret_type, param->is_ascend, param->dtype); } + } else if (call_node->op == Op::Get("dyn.broadcast_to")) { + if (const ConstantNode* shape = call_node->args[1].as()) { + CHECK_EQ(shape->data->ndim, 1); + return MakeBroadCastTo(call_node->args[0], ToVector(shape->data)); + } + } else if (call_node->op == Op::Get("dyn.zeros")) { + if (const ConstantNode* shape = call_node->args[0].as()) { + const InitOpAttrs* param = call_node->attrs.as(); + CHECK(param); + return MakeZeros(ToVector(shape->data), param->dtype); + } + } else if (call_node->op == Op::Get("dyn.ones")) { + if (const ConstantNode* shape = call_node->args[0].as()) { + const InitOpAttrs* param = call_node->attrs.as(); + CHECK(param); + return MakeOnes(ToVector(shape->data), param->dtype); + } } return post; } @@ -106,6 +123,5 @@ TVM_REGISTER_GLOBAL("relay._transform.DynamicToStatic").set_body_typed([]() { }); } // namespace transform - } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 62a58d2b7ffb..adbd1bd44431 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -315,6 +315,7 @@ static inline Constant MakeConstantTensor(DataType dtype, std::vector s /*! * \brief Check whether a shape is static and create corresponding Constant. + Eventually this will be removed and replaced with CheckConstantShapeArrayInteger * * \param shape The Array of the shape values. * \return A Constant. @@ -332,6 +333,26 @@ static inline Constant CheckConstantShape(const Array& shape) { return Constant(shape_array); } +/*! + * \brief Check whether a shape is static and create corresponding Array. Will replace + * CheckConstantShape after dynamic refactorization is complete + * + * \param shape The Array of the shape values. + * \return A Constant. + */ +static inline Array CheckConstantShapeArrayInteger(const Array& shape) { + Array constShape; + + for (size_t i = 0; i < shape.size(); ++i) { + const auto& dim_val = shape[i].as(); + CHECK(dim_val) << "Do not support symbolic shape for " + "Array format. Pass shape as Expr instead."; + + constShape.push_back(dim_val->value); + } + return constShape; +} + /*! * \brief Check if two expressions are equal scalars. * \param a The expression to be checked. @@ -505,7 +526,7 @@ inline Expr ZerosLike(Expr e) { } inline Expr Zeros(Array shape, DataType dtype) { - return MakeZeros(CheckConstantShape(shape), dtype); + return MakeZeros(CheckConstantShapeArrayInteger(shape), dtype); } inline Expr OnesLike(Expr e) { @@ -514,7 +535,7 @@ inline Expr OnesLike(Expr e) { } inline Expr Ones(Array shape, DataType dtype) { - return MakeOnes(CheckConstantShape(shape), dtype); + return MakeOnes(CheckConstantShapeArrayInteger(shape), dtype); } inline Expr CollapseSumLike(Expr e) { @@ -605,7 +626,7 @@ static inline Expr Pad(Expr data, Array> pad_width, double pad_ static inline Expr Tile(Expr data, Array reps) { return MakeTile(data, reps); } static inline Expr BroadCastTo(Expr data, Array shape) { - return MakeBroadCastTo(data, CheckConstantShape(shape)); + return MakeBroadCastTo(data, CheckConstantShapeArrayInteger(shape)); } Expr StopFusion(Expr data); diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py new file mode 100644 index 000000000000..d9b23a717f65 --- /dev/null +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Support level10 operator test cases. + +""" + + +import numpy as np +import tvm +from tvm import relay +from tvm.relay.testing import ctx_list, run_infer_type +import random + +def test_dyn_broadcast_to(): + dtype = 'uint8' + rank = 3 + shape_type = 'int64' + dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type)) + x_shape = (1,) + x = relay.Var("x", relay.ty.TensorType(x_shape, dtype)) + z = relay.broadcast_to(x, dyn_shape) + zz = run_infer_type(z) + + assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype) + + func = relay.Function([x, dyn_shape], z) + + x = np.random.uniform(size=x_shape).astype(dtype) + dyn_shape = (1,)*rank + ref_res = np.broadcast_to(x, dyn_shape) + for target, ctx in ctx_list(): + if (target != 'cuda'): #skip cuda because we don't have dynamic support for GPU + for kind in ["vm", "debug"]: + mod = tvm.ir.IRModule.from_expr(func) + intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) + op_res = intrp.evaluate(func)(x,np.array(dyn_shape).astype(shape_type)) + tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + +test_dyn_broadcast_to() diff --git a/tests/python/relay/dyn/test_dynamic_op_level3.py b/tests/python/relay/dyn/test_dynamic_op_level3.py index 2f473c9de070..e63f9b8cd722 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level3.py +++ b/tests/python/relay/dyn/test_dynamic_op_level3.py @@ -84,7 +84,31 @@ def verify_tile(dshape, reps): verify_tile((2, 3, 4), (1, 2)) verify_tile((2, 3), (3, 2, 1)) + +def test_dyn_zeros_ones(): + def verify_zeros_ones(shape, dtype): + for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: + rank = len(shape) + dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), 'int64')) + y = op(dyn_shape, dtype) + yy = run_infer_type(y) + assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype) + + func = relay.Function([dyn_shape], y) + ref_res = ref(shape, dtype) + for target, ctx in ctx_list(): + if (target != 'cuda'): #skip cuda because no dynamic support for GPU + for kind in ["vm", "debug"]: + mod = tvm.ir.IRModule.from_expr(func) + intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) + op_res = intrp.evaluate(func)(np.array(shape).astype('int64')) + tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + + + verify_zeros_ones((124, 50), 'float64') + if __name__ == "__main__": test_dyn_reshape() test_dyn_shape_reshape() test_dyn_tile() + test_dyn_zeros_ones() \ No newline at end of file diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index 7528267cc3dd..a79f1a514fa7 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -213,6 +213,7 @@ def test_broadcast_to_like(): x = relay.Var("x", relay.ty.TensorType(shape , dtype)) y = relay.Var("y", relay.ty.TensorType(shape_like, dtype)) z = relay.broadcast_to_like(x, y) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) @@ -220,6 +221,7 @@ def test_broadcast_to_like(): x = np.random.uniform(size=shape).astype(dtype) y = np.random.uniform(size=shape_like).astype(dtype) ref_res = np.broadcast_to(x, shape_like) + for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) @@ -472,11 +474,12 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): if __name__ == "__main__": test_adaptive_pool() test_collapse_sum_like() + test_broadcast_to() test_broadcast_to_like() test_slice_like() test_reverse_reshape() test_batch_matmul() test_shape_of() test_sequence_mask() - test_ndarray_size() test_one_hot() + test_ndarray_size() diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index bcd8a644e807..8ca788212ff3 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -181,10 +181,52 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype): for ret_type in ["both", "values", "indices"]: verify_topk(k, axis, ret_type, True, "int64") verify_topk(k, axis, ret_type, False, "float32") +def test_dynamic_to_static_broadcast_to(): + def verify_broadcast_to(shape, broadcast_shape): + x = relay.var("x", relay.TensorType(shape, "float32")) + y = relay.var("y", relay.TensorType(broadcast_shape, "float32")) + z = relay.broadcast_to(x, shape=relay.shape_of(y)) + + func = run_infer_type(relay.Function([x, y], z)) + func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType()) + + zz = func2.body + assert isinstance(zz, relay.Call) + assert zz.op == relay.op.get("broadcast_to") + assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32") + + x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") + y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32") + + ref_res = np.broadcast_to(x_data, y_data.shape) + verify_func(func2, [x_data, y_data], ref_res) + verify_broadcast_to((3, 1), (3, 3)) + +def test_dynamic_to_static_zeros_ones(): + def verify_ones_zeros(shape, dtype): + for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: + x = relay.var("x", relay.TensorType(shape, dtype)) + y = op(relay.shape_of(x), dtype) + + func = run_infer_type(relay.Function([x], y)) + func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType()) + + zz = func2.body + assert isinstance(zz, relay.Constant) + assert zz.checked_type == relay.ty.TensorType(shape, dtype) + + x_data = np.random.uniform(low=1, high=1, size=shape) + ref_res = ref(x_data.shape) + verify_func(func2, [x_data], ref_res) + + verify_ones_zeros((1, 2, 3), 'int64') + verify_ones_zeros((9, 8, 3, 4), 'float32') + if __name__=="__main__": test_dynamic_to_static_reshape() test_dynamic_to_static_double_reshape() test_dynamic_to_static_quad_reshape() test_dynamic_to_static_tile() test_dynamic_to_static_topk() - + test_dynamic_to_static_broadcast_to() + test_dynamic_to_static_zeros_ones()