From 4de676172dd7a42849b5addde742b29908e46d41 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Wed, 15 Jan 2020 22:07:40 -0800 Subject: [PATCH] [Arith] add SizeVar representing non-neg valued variable in a tensor shape (#4684) * [arith] add ShapeVar representing non-neg valued variable in a tensor shape * bounder remover; deal with div in int_set differently * fix bounder_remover * migrate unittest to use shape_var * use tvm.shape_var in integration & relay tests * add test case; fix Var register * fix lint * fix lint again * add default ShapeVar visitor in Relay * fix override * fix ShapeVar visit bug * revert IntervalSet for shape_var * remove bound_remover * remove is_var; use constructor for shapevar/var instead * ShapeVar -> SizeVar; add constructor comments * shape_var -> size_var in doc * tindex -> size --- docs/api/python/tvm.rst | 2 + include/tvm/expr.h | 59 ++++++++++++++++++- include/tvm/ir.h | 3 +- include/tvm/ir_functor_ext.h | 6 ++ python/tvm/api.py | 19 ++++++ python/tvm/expr.py | 19 ++++++ python/tvm/hybrid/preprocessor.py | 2 +- src/api/api_ir.cc | 7 ++- src/arithmetic/bound_deducer.cc | 3 +- src/arithmetic/const_int_bound.cc | 10 ++++ src/arithmetic/int_set.cc | 1 + src/ir/attr_functor.h | 4 ++ src/ir/attrs.cc | 2 +- src/lang/expr.cc | 16 +++-- src/lang/ir.cc | 5 ++ src/pass/ir_functor.cc | 8 +++ src/pass/ssa.cc | 8 +-- src/pass/tensor_core.cc | 2 +- src/schedule/bound.cc | 2 +- tests/python/contrib/test_sparse.py | 8 +-- tests/python/integration/test_ewise.py | 6 +- tests/python/integration/test_gemm.py | 1 - tests/python/integration/test_reduce.py | 8 +-- tests/python/integration/test_scan.py | 4 +- tests/python/relay/test_ir_text_printer.py | 6 +- tests/python/relay/test_op_level1.py | 18 +++--- tests/python/relay/test_op_level10.py | 2 +- tests/python/relay/test_op_level2.py | 49 +++++++-------- tests/python/relay/test_op_level3.py | 10 ++-- tests/python/relay/test_op_level4.py | 2 +- tests/python/relay/test_op_level5.py | 14 ++--- .../unittest/test_arith_const_int_bound.py | 9 +++ tests/python/unittest/test_arith_intset.py | 1 + .../unittest/test_arith_stmt_simplify.py | 9 +-- tests/python/unittest/test_build_lower.py | 6 +- tests/python/unittest/test_codegen_arm.py | 4 +- tests/python/unittest/test_codegen_c_host.py | 2 +- tests/python/unittest/test_codegen_device.py | 2 +- tests/python/unittest/test_codegen_llvm.py | 4 +- tests/python/unittest/test_codegen_rocm.py | 4 +- .../unittest/test_codegen_static_init.py | 8 +-- .../python/unittest/test_codegen_vm_basic.py | 12 ++-- tests/python/unittest/test_hybrid_script.py | 6 +- tests/python/unittest/test_ir_builder.py | 8 +-- tests/python/unittest/test_lang_buffer.py | 46 +++++++-------- tests/python/unittest/test_lang_group.py | 12 ++-- tests/python/unittest/test_lang_schedule.py | 30 +++++----- tests/python/unittest/test_lang_tag.py | 30 +++++----- tests/python/unittest/test_lang_tensor.py | 50 ++++++++-------- .../unittest/test_lang_tensor_overload_op.py | 2 +- .../unittest/test_lang_verify_compute.py | 4 +- tests/python/unittest/test_module_load.py | 2 +- .../unittest/test_pass_bound_checkers.py | 44 +++++++------- .../test_pass_decorate_device_scope.py | 4 +- tests/python/unittest/test_pass_inline.py | 4 +- .../unittest/test_pass_loop_partition.py | 37 ++++++------ tests/python/unittest/test_pass_makeapi.py | 2 +- .../unittest/test_pass_split_host_device.py | 2 +- .../unittest/test_pass_storage_flatten.py | 8 +-- .../python/unittest/test_pass_storage_sync.py | 10 ++-- tests/python/unittest/test_pass_unroll.py | 6 +- topi/python/topi/nn/conv2d.py | 10 ++-- 62 files changed, 417 insertions(+), 267 deletions(-) diff --git a/docs/api/python/tvm.rst b/docs/api/python/tvm.rst index b517195db9e4b..19762fb20d973 100644 --- a/docs/api/python/tvm.rst +++ b/docs/api/python/tvm.rst @@ -24,6 +24,7 @@ The user facing API for computation declaration. tvm.load_json tvm.save_json tvm.var + tvm.size_var tvm.const tvm.convert tvm.placeholder @@ -49,6 +50,7 @@ The user facing API for computation declaration. .. autofunction:: tvm.load_json .. autofunction:: tvm.save_json .. autofunction:: tvm.var +.. autofunction:: tvm.size_var .. autofunction:: tvm.const .. autofunction:: tvm.convert .. autofunction:: tvm.placeholder diff --git a/include/tvm/expr.h b/include/tvm/expr.h index d3686270a293f..bdd0b8f542024 100644 --- a/include/tvm/expr.h +++ b/include/tvm/expr.h @@ -65,27 +65,33 @@ class Var; */ class VarNode : public PrimExprNode { public: + /*! \brief constructor */ + VarNode() {} + VarNode(DataType dtype, std::string name_hint); + /*! * \brief The hint to the variable name. * \note Each variable is uniquely identified by its address. */ std::string name_hint; - static Var make(DataType dtype, std::string name_hint); - void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("name", &name_hint); } static constexpr const char* _type_key = "Variable"; - TVM_DECLARE_FINAL_OBJECT_INFO(VarNode, PrimExprNode); + TVM_DECLARE_BASE_OBJECT_INFO(VarNode, PrimExprNode); }; /*! \brief a named variable in TVM */ class Var : public PrimExpr { public: explicit Var(ObjectPtr n) : PrimExpr(n) {} + /*! \brief constructor + * \param name_hint variable name + * \param t data type + */ TVM_DLL explicit Var(std::string name_hint = "v", DataType t = DataType::Int(32)); /*! @@ -114,6 +120,53 @@ class Var : public PrimExpr { using ContainerType = VarNode; }; +class SizeVar; +/*! + * \brief A variable node represent a tensor index size, + * whose value must be non-negative. + */ +class SizeVarNode : public VarNode { + public: + /*! \brief constructor */ + SizeVarNode() {} + /*! \brief constructor + * \param dtype data type + * \param name_hint variable name + */ + SizeVarNode(DataType dtype, std::string name_hint); + + static constexpr const char* _type_key = "SizeVar"; + TVM_DECLARE_FINAL_OBJECT_INFO(SizeVarNode, VarNode); +}; + +/*! \brief a named variable represents a tensor index size */ +class SizeVar : public Var { + public: + explicit SizeVar(ObjectPtr n) : Var(n) {} + /*! \brief constructor + * \param name_hint variable name + * \param t data type + */ + TVM_DLL explicit SizeVar(std::string name_hint = "s", + DataType t = DataType::Int(32)); + /*! + * \brief Get pointer to the internal value. + * \return the corresponding Variable. + */ + const SizeVarNode* operator->() const { + return get(); + } + /*! + * \brief Get pointer to the internal value. + * \return the corresponding Variable. + */ + const SizeVarNode* get() const { + return static_cast(data_.get()); + } + /*! \brief type indicate the container type */ + using ContainerType = SizeVarNode; +}; + /*! * \brief Container of constant int that adds more constructors. * diff --git a/include/tvm/ir.h b/include/tvm/ir.h index 46645d75f0c29..4e36332e004e2 100644 --- a/include/tvm/ir.h +++ b/include/tvm/ir.h @@ -38,6 +38,7 @@ namespace ir { using IntImmNode = tvm::IntImmNode; using FloatImmNode = tvm::FloatImmNode; using VarNode = tvm::VarNode; +using SizeVarNode = tvm::SizeVarNode; /*! \brief String constants, only used in asserts. */ class StringImmNode : public PrimExprNode { @@ -679,7 +680,7 @@ class AnyNode : public PrimExprNode { void VisitAttrs(AttrVisitor* v) {} /*! \brief Convert to var. */ Var ToVar() const { - return VarNode::make(DataType::Int(32), "any_dim"); + return Var("any_dim", DataType::Int(32)); } TVM_DLL static PrimExpr make(); diff --git a/include/tvm/ir_functor_ext.h b/include/tvm/ir_functor_ext.h index 37a1fe4bffb2c..11e8e3836053b 100644 --- a/include/tvm/ir_functor_ext.h +++ b/include/tvm/ir_functor_ext.h @@ -133,6 +133,9 @@ class ExprFunctor { } // Functions that can be overriden by subclass virtual R VisitExpr_(const VarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; + virtual R VisitExpr_(const SizeVarNode* op, Args... args) { + return VisitExpr_(static_cast(op), std::forward(args)...); + } virtual R VisitExpr_(const LoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LetNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const CallNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; @@ -174,6 +177,7 @@ class ExprFunctor { FType vtable; // Set dispatch IR_EXPR_FUNCTOR_DISPATCH(VarNode); + IR_EXPR_FUNCTOR_DISPATCH(SizeVarNode); IR_EXPR_FUNCTOR_DISPATCH(LoadNode); IR_EXPR_FUNCTOR_DISPATCH(LetNode); IR_EXPR_FUNCTOR_DISPATCH(CallNode); @@ -297,6 +301,7 @@ class TVM_DLL ExprVisitor : using ExprFunctor::VisitExpr; // list of functions to override. void VisitExpr_(const VarNode* op) override; + void VisitExpr_(const SizeVarNode* op) override; void VisitExpr_(const LoadNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const CallNode* op) override; @@ -341,6 +346,7 @@ class TVM_DLL ExprMutator : using ExprFunctor::VisitExpr; // list of functions to override. PrimExpr VisitExpr_(const VarNode* op) override; + PrimExpr VisitExpr_(const SizeVarNode* op) override; PrimExpr VisitExpr_(const LoadNode* op) override; PrimExpr VisitExpr_(const LetNode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; diff --git a/python/tvm/api.py b/python/tvm/api.py index 4bfe794c14d34..4338b5564980a 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -192,6 +192,25 @@ def var(name="tindex", dtype=int32): return _api_internal._Var(name, dtype) +def size_var(name="size", dtype=int32): + """Create a new variable represents a tensor shape size, which is non-negative. + + Parameters + ---------- + name : str + The name + + dtype : str + The data type + + Returns + ------- + var : SizeVar + The result symbolic shape variable. + """ + return _api_internal._SizeVar(name, dtype) + + def any(*args): """Create a new experssion of the union of all conditions in the arguments diff --git a/python/tvm/expr.py b/python/tvm/expr.py index 2fd7b78d9d66e..20d9d89cd2ac2 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -278,6 +278,25 @@ def __init__(self, name, dtype): _api_internal._Var, name, dtype) +@register_object +class SizeVar(Var): + """Symbolic variable to represent a tensor index size + which is greater or equal to zero + + Parameters + ---------- + name : str + The name + + dtype : int + The data type + """ + # pylint: disable=super-init-not-called + def __init__(self, name, dtype): + self.__init_handle_by_constructor__( + _api_internal._SizeVar, name, dtype) + + @register_object class Reduce(PrimExpr): """Reduce node. diff --git a/python/tvm/hybrid/preprocessor.py b/python/tvm/hybrid/preprocessor.py index 1a9de4e3f8015..035e8a40f2453 100644 --- a/python/tvm/hybrid/preprocessor.py +++ b/python/tvm/hybrid/preprocessor.py @@ -63,7 +63,7 @@ def visit_Call(self, node): _internal_assert(func_id in list(HYBRID_GLOBALS.keys()) + \ ['range', 'max', 'min', 'len'] + \ list(self.symbols.keys()), \ - "Function call id not in intrinsics' list") + "Function call id " + func_id + " not in intrinsics' list") for elem in node.args: self.visit(elem) diff --git a/src/api/api_ir.cc b/src/api/api_ir.cc index 261b94eb022e6..3b29ee4c85e6d 100644 --- a/src/api/api_ir.cc +++ b/src/api/api_ir.cc @@ -33,7 +33,12 @@ namespace ir { TVM_REGISTER_GLOBAL("_Var") .set_body_typed([](std::string s, DataType t) { - return VarNode::make(t, s); + return Var(s, t); + }); + +TVM_REGISTER_GLOBAL("_SizeVar") +.set_body_typed([](std::string s, DataType t) { + return SizeVar(s, t); }); TVM_REGISTER_GLOBAL("make.abs") diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index 1ba0293fca8a9..5128096eb40f7 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -86,7 +86,7 @@ class BoundDeducer: public ExprVisitor { void VisitExpr(const PrimExpr& e) final { if (!success_) return; - if (e.get() == path_[iter_++]) { + if (iter_ < path_.size() && e.get() == path_[iter_++]) { ExprVisitor::VisitExpr(e); } else { success_ = false; @@ -297,6 +297,7 @@ void BoundDeducer::Transform() { void BoundDeducer::Deduce() { Init(); if (!success_) return; + Relax(); if (!success_) return; // get the path diff --git a/src/arithmetic/const_int_bound.cc b/src/arithmetic/const_int_bound.cc index 25d88d3429b6f..c1cf1e08d2c3a 100644 --- a/src/arithmetic/const_int_bound.cc +++ b/src/arithmetic/const_int_bound.cc @@ -284,6 +284,16 @@ class ConstIntBoundAnalyzer::Impl : } } + Entry VisitExpr_(const SizeVarNode* op) final { + SizeVar v = GetRef(op); + auto it = var_map_.find(v); + if (it != var_map_.end()) { + return it->second; + } else { + return MakeBound(0, kPosInf); + } + } + Entry VisitRightShift(const CallNode* op) { Entry a = VisitExpr(op->args[0]); Entry b = VisitExpr(op->args[1]); diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index 37d5e9eb5e57e..3ea6ee5cc608e 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -401,6 +401,7 @@ class IntervalSetEvaluator : } } + IntervalSet VisitExpr_(const AddNode* op) final { return VisitBinaryExpr_(op); } diff --git a/src/ir/attr_functor.h b/src/ir/attr_functor.h index c1401233d8a1e..68725689db134 100644 --- a/src/ir/attr_functor.h +++ b/src/ir/attr_functor.h @@ -81,6 +81,9 @@ class AttrFunctor { virtual R VisitAttr_(const ir::StringImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; // deep comparison of symbolic integer expressions. virtual R VisitAttr_(const VarNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; + virtual R VisitAttr_(const SizeVarNode* op, Args... args) { + return VisitAttr_(static_cast(op), std::forward(args)...); + } virtual R VisitAttr_(const ir::AddNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const ir::SubNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const ir::MulNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; @@ -115,6 +118,7 @@ class AttrFunctor { ATTR_FUNCTOR_DISPATCH(FloatImmNode); ATTR_FUNCTOR_DISPATCH(StringImmNode); ATTR_FUNCTOR_DISPATCH(VarNode); + ATTR_FUNCTOR_DISPATCH(SizeVarNode); ATTR_FUNCTOR_DISPATCH(AddNode); ATTR_FUNCTOR_DISPATCH(SubNode); ATTR_FUNCTOR_DISPATCH(MulNode); diff --git a/src/ir/attrs.cc b/src/ir/attrs.cc index f2f923359efae..d9063fbf0e6fa 100644 --- a/src/ir/attrs.cc +++ b/src/ir/attrs.cc @@ -114,7 +114,7 @@ bool AttrsEqualHandler::VisitAttr_(const StringImmNode* lhs, const ObjectRef& ot bool AttrsEqualHandler::VisitAttr_(const ArrayNode* lhs, const ObjectRef& other) { if (const auto* rhs = other.as()) { if (rhs->data.size() != lhs->data.size()) return false; - for (size_t i = 0; i < lhs->data.size(); ++i) { + for (size_t i = 0; i < lhs->data.size(); ++i) { if (!Equal(lhs->data[i], rhs->data[i])) return false; } } diff --git a/src/lang/expr.cc b/src/lang/expr.cc index a95bd40a09e11..1dd88b5d0bbba 100644 --- a/src/lang/expr.cc +++ b/src/lang/expr.cc @@ -39,15 +39,19 @@ PrimExpr::PrimExpr(std::string str) : PrimExpr(ir::StringImmNode::make(str)) {} Var::Var(std::string name_hint, DataType t) - : Var(VarNode::make(t, name_hint)) {} + : Var(make_object(t, name_hint)) {} -Var VarNode::make(DataType t, std::string name_hint) { - ObjectPtr node = make_object(); - node->dtype = t; - node->name_hint = std::move(name_hint); - return Var(node); +VarNode::VarNode(DataType t, std::string name_hint) { + this->dtype = t; + this->name_hint = std::move(name_hint); } +SizeVar::SizeVar(std::string name_hint, DataType t) + : SizeVar(make_object(t, name_hint)) {} + +SizeVarNode::SizeVarNode(DataType t, std::string name_hint) + : VarNode(t, std::move(name_hint)) {} + Range::Range(PrimExpr begin, PrimExpr end) : Range(make_object( begin, diff --git a/src/lang/ir.cc b/src/lang/ir.cc index 274459ec088c5..d513075370380 100644 --- a/src/lang/ir.cc +++ b/src/lang/ir.cc @@ -592,6 +592,10 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) // stream << op->name << "." << op->type; p->stream << op->name_hint; }) +.set_dispatch([](const ObjectRef& node, NodePrinter* p) { + auto* op = static_cast(node.get()); + p->stream << "{" << op->name_hint << "|" << op->name_hint << ">=0}"; + }) .set_dispatch([](const ObjectRef& node, NodePrinter* p) { auto* op = static_cast(node.get()); p->stream << '('; @@ -1143,6 +1147,7 @@ TVM_REGISTER_NODE_TYPE(IntImmNode); TVM_REGISTER_NODE_TYPE(StringImmNode); TVM_REGISTER_NODE_TYPE(CastNode); TVM_REGISTER_NODE_TYPE(VarNode); +TVM_REGISTER_NODE_TYPE(SizeVarNode); TVM_REGISTER_NODE_TYPE(AddNode); TVM_REGISTER_NODE_TYPE(SubNode); TVM_REGISTER_NODE_TYPE(MulNode); diff --git a/src/pass/ir_functor.cc b/src/pass/ir_functor.cc index 857206f8dd9f7..2c996745fa76f 100644 --- a/src/pass/ir_functor.cc +++ b/src/pass/ir_functor.cc @@ -221,6 +221,10 @@ void StmtVisitor::VisitStmt_(const EvaluateNode* op) { void ExprVisitor::VisitExpr_(const VarNode* op) {} +void ExprVisitor::VisitExpr_(const SizeVarNode* op) { + this->VisitExpr_(static_cast(op)); +} + void ExprVisitor::VisitExpr_(const LoadNode* op) { this->VisitExpr(op->index); this->VisitExpr(op->predicate); @@ -596,6 +600,10 @@ PrimExpr ExprMutator::VisitExpr_(const VarNode* op) { return GetRef(op); } +PrimExpr ExprMutator::VisitExpr_(const SizeVarNode* op) { + return this->VisitExpr_(static_cast(op)); +} + PrimExpr ExprMutator::VisitExpr_(const LoadNode* op) { PrimExpr index = this->VisitExpr(op->index); PrimExpr predicate = this->VisitExpr(op->predicate); diff --git a/src/pass/ssa.cc b/src/pass/ssa.cc index 8375e806a006a..50cdc528f2078 100644 --- a/src/pass/ssa.cc +++ b/src/pass/ssa.cc @@ -87,7 +87,7 @@ class IRConvertSSA final : public StmtExprMutator { const Var& v = op->var; if (defined_.count(v.get())) { PrimExpr value = this->VisitExpr(op->value); - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); PrimExpr body = this->VisitExpr(op->body); scope_[v.get()].pop_back(); @@ -123,7 +123,7 @@ class IRConvertSSA final : public StmtExprMutator { const Var& v = op->var; if (defined_.count(v.get())) { PrimExpr value = this->VisitExpr(op->value); - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt body = this->VisitStmt(op->body); scope_[v.get()].pop_back(); @@ -136,7 +136,7 @@ class IRConvertSSA final : public StmtExprMutator { Stmt VisitStmt_(const ForNode* op) final { const Var& v = op->loop_var; if (defined_.count(v.get())) { - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt stmt = StmtExprMutator::VisitStmt_(op); scope_[v.get()].pop_back(); @@ -151,7 +151,7 @@ class IRConvertSSA final : public StmtExprMutator { Stmt VisitStmt_(const AllocateNode* op) final { const Var& v = op->buffer_var; if (defined_.count(v.get())) { - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt stmt = StmtExprMutator::VisitStmt_(op); scope_[v.get()].pop_back(); diff --git a/src/pass/tensor_core.cc b/src/pass/tensor_core.cc index 002e42297faa4..dad2780be425f 100644 --- a/src/pass/tensor_core.cc +++ b/src/pass/tensor_core.cc @@ -1108,7 +1108,7 @@ class TensorCoreIRMutator : public StmtExprMutator { auto it2 = matrix_abc_.find(simplify_name(call->name)); CHECK(it2 != matrix_abc_.end()) << "Cannot find matrix info for " << call->name; - buffer_node->data = VarNode::make(DataType::Handle(), call->name); + buffer_node->data = Var(call->name, DataType::Handle()); buffer_node->name = call->name; buffer_node->scope = "wmma." + it2->second; buffer_node->dtype = datatype; diff --git a/src/schedule/bound.cc b/src/schedule/bound.cc index ce2397b1d4f70..5f363dbc126ec 100644 --- a/src/schedule/bound.cc +++ b/src/schedule/bound.cc @@ -237,7 +237,7 @@ Map InferBound(const Schedule& sch) { InferRootBound(stage, ctx, &ret); // bind bound of root iter vars. - for (auto iv : stage->op->root_iter_vars()) { + for (auto iv : stage->op->root_iter_vars()) { auto it = ret.find(iv); if (it != ret.end()) { analyzer.Bind(iv->var, it->second); diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py index bc815f6a07188..5425b196574e5 100644 --- a/tests/python/contrib/test_sparse.py +++ b/tests/python/contrib/test_sparse.py @@ -25,8 +25,8 @@ def test_static_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype) assert(A.stype == 'csr') n = 3 @@ -50,7 +50,7 @@ def test_dynamic_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') + nr, nc, n = tvm.size_var('nr'), tvm.size_var('nc'), tvm.size_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') @@ -76,7 +76,7 @@ def test_sparse_array_tuple(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') + nr, nc, n = tvm.size_var('nr'), tvm.size_var('nc'), tvm.size_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') diff --git a/tests/python/integration/test_ewise.py b/tests/python/integration/test_ewise.py index 31a39c40fa1fa..e3a1087acea3f 100644 --- a/tests/python/integration/test_ewise.py +++ b/tests/python/integration/test_ewise.py @@ -57,7 +57,7 @@ def check_device(device, host="stackvm"): def test_fmod(): # graph def run(dtype): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) C = tvm.compute(A.shape, lambda *i: tvm.fmod(A(*i), B(*i)), name='C') @@ -140,7 +140,7 @@ def check_device(device, host="stackvm"): def test_log_pow_llvm(): # graph - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute(A.shape, lambda *i: tvm.power(tvm.log(A(*i)), 2.0), name='B') s = tvm.create_schedule(B.op) @@ -207,7 +207,7 @@ def check_device(device): def test_add(): def run(dtype): # graph - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) bias = tvm.var("bias", dtype=dtype) diff --git a/tests/python/integration/test_gemm.py b/tests/python/integration/test_gemm.py index 9b1a4bf10e198..d61335f68924c 100644 --- a/tests/python/integration/test_gemm.py +++ b/tests/python/integration/test_gemm.py @@ -22,7 +22,6 @@ def test_gemm(): # graph nn = 1024 - n = tvm.var('n') n = tvm.convert(nn) m = n l = n diff --git a/tests/python/integration/test_reduce.py b/tests/python/integration/test_reduce.py index acbec36c510eb..1f094c274d01b 100644 --- a/tests/python/integration/test_reduce.py +++ b/tests/python/integration/test_reduce.py @@ -21,8 +21,8 @@ def test_reduce_prims(): def test_prim(reducer, np_reducer): # graph - n = tvm.var('n') - m = tvm.var('m') + n = tvm.size_var('n') + m = tvm.size_var('m') A = tvm.placeholder((n, m), name='A') R = tvm.compute((n, ), lambda i: tvm.expr.Select((i > 1), 1, 0), name='R') k = tvm.reduce_axis((0, m)) @@ -242,8 +242,8 @@ def fidentity(t0, t1): argmax = tvm.comm_reducer(fcombine, fidentity, name='argmax') - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') idx = tvm.placeholder((m, n), name='idx', dtype='int32') val = tvm.placeholder((m, n), name='val', dtype='float32') k = tvm.reduce_axis((0, n), 'k') diff --git a/tests/python/integration/test_scan.py b/tests/python/integration/test_scan.py index 61a78090be65b..366ed3d4f1a56 100644 --- a/tests/python/integration/test_scan.py +++ b/tests/python/integration/test_scan.py @@ -18,8 +18,8 @@ import numpy as np def test_scan(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") X = tvm.placeholder((m, n), name="X") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: X[0, i]) diff --git a/tests/python/relay/test_ir_text_printer.py b/tests/python/relay/test_ir_text_printer.py index 6426bf3410c86..e84de67651778 100644 --- a/tests/python/relay/test_ir_text_printer.py +++ b/tests/python/relay/test_ir_text_printer.py @@ -70,7 +70,7 @@ def test_env(): def test_meta_data(): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", shape=(n, c, h, w)) w = relay.var("w") z = relay.nn.conv2d(x, w, @@ -82,8 +82,8 @@ def test_meta_data(): text_no_meta = str(f) assert "channels=2" in text assert "channels=2" in text_no_meta - assert "meta[Variable][0]" in text - assert "meta[Variable][0]" in text_no_meta + assert "meta[SizeVar][0]" in text + assert "meta[SizeVar][0]" in text_no_meta assert "type_key" in text assert "type_key" not in text_no_meta diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 6723369f3886d..adfcbb193de75 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -177,7 +177,7 @@ def test_bias_add(): def test_expand_dims_infer_type(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", shape=(n, t, d), dtype=dtype) y = relay.expand_dims(x, axis=2) assert "axis=2" in y.astext() @@ -227,7 +227,7 @@ def test_log_softmax(): def test_concatenate(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", shape=(n, t, d)) y = relay.var("y", shape=(n, t, d)) z = relay.concatenate((x, y), axis=-1) @@ -280,7 +280,7 @@ def test_concatenate(): def test_dropout(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d") + n, t, d = tvm.size_var("n"), tvm.size_var("t"), tvm.size_var("d") input_ty = relay.TensorType((n, t, d), dtype) x = relay.var("x", input_ty) y = relay.nn.dropout(x, rate=0.75) @@ -342,7 +342,7 @@ def test_dense(): # Dense accuracy for float16 is poor if dtype == 'float16': return - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.TensorType((2, w), dtype)) y = relay.nn.dense(x, w, units=2) @@ -350,15 +350,15 @@ def test_dense(): yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype) - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) - wh, ww = tvm.var("wh"), tvm.var("ww") + wh, ww = tvm.size_var("wh"), tvm.size_var("ww") w = relay.var("w", relay.TensorType((ww, wh), dtype)) y = relay.nn.dense(x, w) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype) - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.IncompleteType()) y = relay.nn.dense(x, w, units=2) @@ -388,7 +388,7 @@ def test_dense_dtype(): data_dtype = 'uint8' weight_dtype = 'int8' out_dtype = 'uint8' - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype)) w = relay.var("w", relay.TensorType((2, w), weight_dtype)) y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype) @@ -400,7 +400,7 @@ def test_dense_dtype(): def test_bitserial_dense(): - m, k = tvm.var("m"), tvm.var("k") + m, k = tvm.size_var("m"), tvm.size_var("k") x = relay.var("x", relay.TensorType((m, k), "int16")) w = relay.var("w", relay.TensorType((k, 32), "int16")) y = relay.nn.bitserial_dense(x, w, units=32) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index d9e29d8bbd9f5..bb1d346ac6e01 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -309,7 +309,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): tvm.testing.assert_allclose(z.asnumpy(), z_np, rtol=1e-5) def test_batch_matmul(): - b, m, n, k = tvm.var("b"), tvm.var("m"), tvm.var("n"), tvm.var("k") + b, m, n, k = tvm.size_var("b"), tvm.size_var("m"), tvm.size_var("n"), tvm.size_var("k") x = relay.var("x", relay.TensorType((b, m, k), "float32")) y = relay.var("y", relay.TensorType((b, n, k), "float32")) z = relay.nn.batch_matmul(x, y) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 4b914ee11fee8..a098b5c5cdfad 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -128,7 +128,7 @@ def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, def test_conv2d_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32")) w = relay.var("w") y = relay.nn.conv2d(x, w, @@ -142,7 +142,7 @@ def test_conv2d_infer_type(): (2, 10, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -152,7 +152,7 @@ def test_conv2d_infer_type(): (n, 2, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -391,7 +391,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, def test_conv3d_infer_type(): # symbolic in batch dimension - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32")) w = relay.var("w") y = relay.nn.conv3d(x, w, @@ -405,7 +405,7 @@ def test_conv3d_infer_type(): (2, 10, 3, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -415,7 +415,7 @@ def test_conv3d_infer_type(): (n, 2, 222, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -530,7 +530,7 @@ def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, def test_conv2d_transpose_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.var("n"), 10, 10, 12 + n, c, h, w = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) w = relay.var("w", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, @@ -545,7 +545,7 @@ def test_conv2d_transpose_infer_type(): (10, 15, 3, 3), "float32") # infer by shape of w, mixed precision - n, h, w, c = tvm.var("n"), 10, 10, 12 + n, h, w, c = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32")) y = relay.nn.conv2d_transpose(x, w, @@ -630,7 +630,7 @@ def test_conv1d_transpose_ncw_run(): def test_upsampling_infer_type(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") @@ -639,14 +639,15 @@ def test_upsampling_infer_type(): assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.var("n"), tvm.var("c") + n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32") def test_upsampling3d_infer_type(): - n, c, d, h, w = tvm.var("n"), tvm.var("c"), tvm.var("d"), tvm.var("h"), tvm.var("w") + n, c, d, h, w = tvm.size_var("n"), tvm.size_var("c"),\ + tvm.size_var("d"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") @@ -656,14 +657,14 @@ def test_upsampling3d_infer_type(): tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.var("n"), tvm.var("c") + n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32") def _test_pool2d(opfunc, reffunc): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -683,7 +684,7 @@ def _test_pool2d(opfunc, reffunc): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -703,13 +704,13 @@ def _test_pool2d_int(opfunc, reffunc, dtype): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): - n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224 + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) y = opfunc(x, layout="NHWC") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32") - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x) yy = run_infer_type(y) @@ -768,7 +769,7 @@ def _test_pool1d(opfunc): def test_pool3d(): def _test_pool3d(opfunc): - n, c, d, h, w = tvm.var("n"), 10, 5, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = opfunc(x, pool_size=(1, 1, 1)) assert "pool_size=" in y.astext() @@ -828,7 +829,7 @@ def test_avg_pool2d_no_count_pad(): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): - d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4") + d1, d2, d3, d4 = tvm.size_var("d1"), tvm.size_var("d2"), tvm.size_var("d3"), tvm.size_var("d4") x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) @@ -873,7 +874,7 @@ def test_pad_infer_type(): assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32") # some symbolic values - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) yy = run_infer_type(y) @@ -896,7 +897,7 @@ def _test_run(dtype): _test_run('int32') def test_lrn(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) "alpha=" in y.astext() @@ -927,7 +928,7 @@ def test_lrn(): tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) "axis=" in y.astext() @@ -977,7 +978,7 @@ def test_batch_flatten(): def _test_upsampling(layout, method, align_corners=False): - n, c, h, w = tvm.var("n"), 16, 32, 32 + n, c, h, w = tvm.size_var("n"), 16, 32, 32 scale_h = 2.0 scale_w = 2.0 dtype = "float32" @@ -1016,7 +1017,7 @@ def test_upsampling(): _test_upsampling("NHWC", "bilinear", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"): - n, c, d, h, w = tvm.var("n"), 8, 16, 16, 16 + n, c, d, h, w = tvm.size_var("n"), 8, 16, 16, 16 scale_d = 2.0 scale_h = 2.0 scale_w = 2.0 @@ -1183,7 +1184,7 @@ def _has_fast_int8_instructions(asm, target): def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. - n, c, h, w = tvm.var("n"), 32, 224, 224 + n, c, h, w = tvm.size_var("n"), 32, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16")) w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16")) y = relay.nn.bitserial_conv2d( diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 2d92489328af8..13f17ca6713be 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -171,7 +171,7 @@ def verify_squeeze(shape, dtype, axis): def test_transpose_infer_type(): - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.transpose(x, axes=(1, 0, 2)) assert "axes=" in y.astext() @@ -279,7 +279,7 @@ def test_reshape_like_infer_type(): assert zz.checked_type == relay.TensorType((1, 6), "float32") # symbolic shape - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.var("y", relay.TensorType((1, 8, 8), "float32")) z = relay.reshape_like(x, y) @@ -452,7 +452,7 @@ def test_full_like_infer_type(): assert yy.checked_type == relay.TensorType((1, 2, 3), "float32") # symbolic shape - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") base = relay.var("base", relay.TensorType((n, c, h, w), "float32")) fill = relay.var("fill", relay.TensorType((), "float32")) y = relay.full_like(base, fill) @@ -480,7 +480,7 @@ def verify_full_like(base, fill_value, dtype): def test_infer_type_leaky_relu(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.leaky_relu(x, alpha=0.1) "alpha=0.1" in y.astext() @@ -544,7 +544,7 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): def test_infer_type_prelu(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w)) verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c)) verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w)) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 431f014c31a06..2b25d6a90af6f 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -29,7 +29,7 @@ def run_infer_type(expr): def test_binary_op(): def check_binary_op(opfunc, ref): - n = tvm.var("n") + n = tvm.size_var("n") t1 = relay.TensorType((5, n, 5)) t2 = relay.TensorType((n, 1)) x = relay.var("x", t1) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index 808fc49c29bbb..d4abf3d82cedf 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -31,7 +31,7 @@ def run_infer_type(expr): return entry if isinstance(expr, relay.Function) else entry.body def test_resize_infer_type(): - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) th, tw = tvm.var("th"), tvm.var("tw") z = relay.image.resize(x, (th, tw)) @@ -187,7 +187,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True) - y = relay.var("y", relay.TensorType((tvm.var("n"), 3, 56, 56), "float32")) + y = relay.var("y", relay.TensorType((tvm.size_var("n"), 3, 56, 56), "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True) @@ -195,7 +195,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), ref_res = get_ref_result(dshape, clip=False) x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False) - y = relay.var("y", relay.TensorType((tvm.var("n"), 24, 32, 32), "float32")) + y = relay.var("y", relay.TensorType((tvm.size_var("n"), 24, 32, 32), "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True) @@ -287,7 +287,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, np_indices_result = np.array([[3, 0, -1, -1, -1]]) num_anchors = 5 - dshape = (tvm.var("n"), num_anchors, 6) + dshape = (tvm.size_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, force_suppress=True, top_k=2, check_type_only=True) dshape = (1, num_anchors, 6) @@ -298,7 +298,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]) np_indices_result = np.array([[3, 0, 1, -1, -1]]) - dshape = (tvm.var("n"), num_anchors, 6) + dshape = (tvm.size_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, check_type_only=True) dshape = (1, num_anchors, 6) @@ -361,7 +361,7 @@ def test_default_value(): def test_threshold(): num_anchors = 5 num_classes = 5 - n = tvm.var("n") + n = tvm.size_var("n") cls_prob = relay.var( "cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")) @@ -527,7 +527,7 @@ def verify_yolo_reorg(shape, stride, out_shape): assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(out_shape, "float32") - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") idxd = tvm.indexdiv verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2)) verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2))) diff --git a/tests/python/unittest/test_arith_const_int_bound.py b/tests/python/unittest/test_arith_const_int_bound.py index 49448616a0faa..ae2837d6446f4 100644 --- a/tests/python/unittest/test_arith_const_int_bound.py +++ b/tests/python/unittest/test_arith_const_int_bound.py @@ -275,6 +275,14 @@ def test_mix_index_bound(): assert bd.max_value == (23 // 7) * 7 + 6 +def test_size_var_bound(): + analyzer = tvm.arith.Analyzer() + x = tvm.size_var("x") + bd = analyzer.const_int_bound(x) + assert bd.min_value == 0 + assert bd.max_value == bd.POS_INF + + if __name__ == "__main__": test_dtype_bound() test_cast_bound() @@ -288,3 +296,4 @@ def test_mix_index_bound(): test_select_bound() test_shift_and_bound() test_mix_index_bound() + test_size_var_bound() diff --git a/tests/python/unittest/test_arith_intset.py b/tests/python/unittest/test_arith_intset.py index 17cc6f1a712b3..20e3f573776e1 100644 --- a/tests/python/unittest/test_arith_intset.py +++ b/tests/python/unittest/test_arith_intset.py @@ -60,6 +60,7 @@ def test_add_sub(): def test_mul_div(): ck = IntSetChecker() x, y = tvm.var("x"), tvm.var("y") + tdiv = tvm.truncdiv ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True) ck.verify(x * y, {x : tvm.arith.IntervalSet(0, 10)}, (0, 10 * y)) diff --git a/tests/python/unittest/test_arith_stmt_simplify.py b/tests/python/unittest/test_arith_stmt_simplify.py index 272893e20c127..9e0b47749fee2 100644 --- a/tests/python/unittest/test_arith_stmt_simplify.py +++ b/tests/python/unittest/test_arith_stmt_simplify.py @@ -20,7 +20,7 @@ def test_stmt_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.var("n") + n = tvm.size_var("n") with ib.for_range(0, n, name="i") as i: with ib.if_scope(i < 12): A[i] = C[i] @@ -34,7 +34,7 @@ def test_thread_extent_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.var("n") + n = tvm.size_var("n") tx = tvm.thread_axis("threadIdx.x") ty = tvm.thread_axis("threadIdx.y") ib.scope_attr(tx, "thread_extent", n) @@ -48,7 +48,7 @@ def test_thread_extent_simplify(): def test_basic_likely_elimination(): - n = tvm.var('n') + n = tvm.size_var('n') X = tvm.placeholder(shape=(n,), name="x") W = tvm.placeholder(shape=(n + 1,), dtype="int32", name="w") @@ -87,7 +87,8 @@ def sls(n, d): return tvm.compute(oshape, sls) - m, n, d, i, l = tvm.var('m'), tvm.var('n'), tvm.var('d'), tvm.var('i'), tvm.var('l') + m, n, d, i, l = tvm.size_var('m'), tvm.size_var('n'), tvm.size_var('d'),\ + tvm.size_var('i'), tvm.size_var('l') data_ph = tvm.placeholder((m, d * 32), name="data") indices_ph = tvm.placeholder((i,), name="indices", dtype="int32") lengths_ph = tvm.placeholder((n,), name="lengths", dtype="int32") diff --git a/tests/python/unittest/test_build_lower.py b/tests/python/unittest/test_build_lower.py index 090120c1c921a..58312dc83932d 100644 --- a/tests/python/unittest/test_build_lower.py +++ b/tests/python/unittest/test_build_lower.py @@ -17,8 +17,8 @@ import tvm def test_lower_rfactor(): - n = tvm.var("n") - m = tvm.var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") @@ -33,7 +33,7 @@ def test_lower_rfactor(): fapi = tvm.lower(s, [A, B]) def test_dependent_output_shape(): - n, m, x = tvm.var('n'), tvm.var('m'), tvm.var('x') + n, m, x = tvm.size_var('n'), tvm.size_var('m'), tvm.size_var('x') A = tvm.placeholder((n, m)) B = tvm.compute((m, n//x), lambda i, j: A[i,j] , name='B') s = tvm.create_schedule(B.op) diff --git a/tests/python/unittest/test_codegen_arm.py b/tests/python/unittest/test_codegen_arm.py index 2385f2ffb59a6..8e2ad7aa76e03 100644 --- a/tests/python/unittest/test_codegen_arm.py +++ b/tests/python/unittest/test_codegen_arm.py @@ -47,7 +47,7 @@ def test_vmlal_s16(): target = 'llvm -target=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon' def check_correct_assembly(N): - K = tvm.var("K") + K = tvm.size_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K, N), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) @@ -67,7 +67,7 @@ def check_correct_assembly(N): check_correct_assembly(64) def check_broadcast_correct_assembly(N): - K = tvm.var("K") + K = tvm.size_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K,), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) diff --git a/tests/python/unittest/test_codegen_c_host.py b/tests/python/unittest/test_codegen_c_host.py index 92baca25bf11e..c08fcd6afbc17 100644 --- a/tests/python/unittest/test_codegen_c_host.py +++ b/tests/python/unittest/test_codegen_c_host.py @@ -67,7 +67,7 @@ def check_c(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.var('Aoffset'), + elem_offset=tvm.size_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_device.py b/tests/python/unittest/test_codegen_device.py index 5a10618fb2692..1c3ece2c42a04 100644 --- a/tests/python/unittest/test_codegen_device.py +++ b/tests/python/unittest/test_codegen_device.py @@ -45,7 +45,7 @@ def check_target(device): def test_add_pipeline(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(), name='C') diff --git a/tests/python/unittest/test_codegen_llvm.py b/tests/python/unittest/test_codegen_llvm.py index 4920206ee019f..42e85816f0d81 100644 --- a/tests/python/unittest/test_codegen_llvm.py +++ b/tests/python/unittest/test_codegen_llvm.py @@ -79,7 +79,7 @@ def check_llvm(use_file): def test_llvm_lookup_intrin(): ib = tvm.ir_builder.create() - m = tvm.var("m") + m = tvm.size_var("m") A = ib.pointer("uint8x8", name="A") x = tvm.call_llvm_intrin("uint8x8", "llvm.ctpop.i8", tvm.const(1, 'uint32'), A) ib.emit(x) @@ -131,7 +131,7 @@ def check_llvm(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.var('Aoffset'), + elem_offset=tvm.size_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_rocm.py b/tests/python/unittest/test_codegen_rocm.py index bba72e0531422..9f8ab772c5f79 100644 --- a/tests/python/unittest/test_codegen_rocm.py +++ b/tests/python/unittest/test_codegen_rocm.py @@ -26,8 +26,8 @@ @unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_cross_thread_reduction(): # based on the reduction tutorial - n = tvm.var("n") - m = tvm.var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") diff --git a/tests/python/unittest/test_codegen_static_init.py b/tests/python/unittest/test_codegen_static_init.py index b1092309e70af..80c4fa4df0e88 100644 --- a/tests/python/unittest/test_codegen_static_init.py +++ b/tests/python/unittest/test_codegen_static_init.py @@ -20,9 +20,9 @@ def test_static_callback(): dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) cp = tvm.thread_axis((0, 1), "cop") @@ -41,9 +41,9 @@ def test_static_callback(): def test_static_init(): dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() handle = tvm.call_intrin("handle", "tvm_static_handle") ib.emit( diff --git a/tests/python/unittest/test_codegen_vm_basic.py b/tests/python/unittest/test_codegen_vm_basic.py index 7ff2177280343..eebcb2e716538 100644 --- a/tests/python/unittest/test_codegen_vm_basic.py +++ b/tests/python/unittest/test_codegen_vm_basic.py @@ -32,7 +32,7 @@ def tvm_call_back_get_shape(shape0): print(shape0) assert shape0 == a.shape[0] - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), tvm.float32) stmt = tvm.make.Evaluate(tvm.call_packed("tvm_call_back_get_shape", Ab.shape[0])) fapi = tvm.ir_pass.MakeAPI(stmt, "print_shape", [Ab], 0, True) @@ -47,9 +47,9 @@ def tvm_stack_vm_print(*x): def test_stack_vm_loop(): dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) @@ -69,7 +69,7 @@ def check(f): def test_stack_vm_cond(): dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) ib = tvm.ir_builder.create() @@ -93,9 +93,9 @@ def check(f): def test_vm_parallel(): dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n, "i", for_type="parallel") as i: diff --git a/tests/python/unittest/test_hybrid_script.py b/tests/python/unittest/test_hybrid_script.py index 5f1facb2b45fc..9eca902ec9ec3 100644 --- a/tests/python/unittest/test_hybrid_script.py +++ b/tests/python/unittest/test_hybrid_script.py @@ -98,8 +98,8 @@ def outer_product(n, m, a, b): #Test global function #Test bridge between frontend and backend def test_outer_product(): - n = tvm.var('n') - m = tvm.var('m') + n = tvm.size_var('n') + m = tvm.size_var('m') a = tvm.placeholder((n, ), name='a') b = tvm.placeholder((m, ), name='b') @@ -167,7 +167,7 @@ def fanout(n, a): b[i] = sigma return b - n = tvm.var('n') + n = tvm.size_var('n') a = tvm.placeholder((n, ), 'float32', name='a') try: b = fanout(n, a) diff --git a/tests/python/unittest/test_ir_builder.py b/tests/python/unittest/test_ir_builder.py index 8b9da90c914cc..527f68669281c 100644 --- a/tests/python/unittest/test_ir_builder.py +++ b/tests/python/unittest/test_ir_builder.py @@ -19,7 +19,7 @@ def test_for(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") A = ib.allocate("float32", n, name="A", scope="global") with ib.for_range(0, n, name="i") as i: A[i] = A[i] + 1 @@ -39,7 +39,7 @@ def test_for(): def test_if(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") A = ib.pointer("float32", name="A") tmod = tvm.truncmod with ib.for_range(0, n, name="i") as i: @@ -60,7 +60,7 @@ def test_if(): def test_prefetch(): A = tvm.placeholder((10, 20), name="A") ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") with ib.for_range(0, n, name="i") as i: ib.emit( @@ -105,7 +105,7 @@ def check_target(target): check_target("llvm") def test_gpu(): - n = tvm.var('n') + n = tvm.size_var('n') dtype = "float32" A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') diff --git a/tests/python/unittest/test_lang_buffer.py b/tests/python/unittest/test_lang_buffer.py index f0f246139bebb..e681bd9a5230a 100644 --- a/tests/python/unittest/test_lang_buffer.py +++ b/tests/python/unittest/test_lang_buffer.py @@ -19,9 +19,9 @@ import numpy as np def test_buffer(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') Ab = tvm.decl_buffer((m, n), tvm.float32) Bb = tvm.decl_buffer((n, l), tvm.float32) @@ -31,8 +31,8 @@ def test_buffer(): def test_buffer_access_ptr(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, strides=[n + 1 , 1]) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], Ab.strides[0] * m) @@ -43,14 +43,14 @@ def test_buffer_access_ptr(): def test_buffer_access_ptr_offset(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw", offset=100) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 100) assert aptr.args[4].value == Buffer.READ | Buffer.WRITE - v = tvm.var('int32') + v = tvm.size_var('int32') aptr = Ab.access_ptr("rw", offset=100 + 100 + v) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 200 + v) @@ -62,8 +62,8 @@ def test_buffer_access_ptr_offset(): def test_buffer_access_ptr_extent(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], m * n) @@ -75,8 +75,8 @@ def test_buffer_access_ptr_extent(): def test_buffer_vload(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, elem_offset=100) load = Ab.vload([2, 3]) offset = tvm.ir_pass.Simplify(load.index) @@ -84,11 +84,11 @@ def test_buffer_vload(): def test_buffer_index_merge_mult_mod(): - m = tvm.var('m') - n = tvm.var('n') - s = tvm.var('s') - k0 = tvm.var('k0') - k1 = tvm.var('k1') + m = tvm.size_var('m') + n = tvm.size_var('n') + s = tvm.size_var('s') + k0 = tvm.size_var('k0') + k1 = tvm.size_var('k1') A = tvm.decl_buffer((m, n), tvm.float32) A_stride = tvm.decl_buffer((m, n), tvm.float32, strides=(s, 1)) def assert_simplified_equal(index_simplified, index_direct): @@ -123,9 +123,9 @@ def assert_simplified_equal(index_simplified, index_direct): def test_buffer_broadcast(): - m0, m1, m2 = tvm.var("m0"), tvm.var("m1"), tvm.var("m2") - n0, n1, n2 = tvm.var("n0"), tvm.var("n1"), tvm.var("n2") - o0, o1, o2 = tvm.var("o0"), tvm.var("o1"), tvm.var("o2") + m0, m1, m2 = tvm.size_var("m0"), tvm.size_var("m1"), tvm.size_var("m2") + n0, n1, n2 = tvm.size_var("n0"), tvm.size_var("n1"), tvm.size_var("n2") + o0, o1, o2 = tvm.size_var("o0"), tvm.size_var("o1"), tvm.size_var("o2") A = tvm.placeholder((m0, m1, m2), name='A') B = tvm.placeholder((n0, n1, n2), name='B') @@ -151,9 +151,9 @@ def check(): def test_buffer_broadcast_expr(): - n0, m0, x = tvm.var('n0'), tvm.var('m0'), tvm.var('x') - n1, m1 = tvm.var('n1'), tvm.var('m1') - o0, o1 = tvm.var('o0'), tvm.var('o1') + n0, m0, x = tvm.size_var('n0'), tvm.size_var('m0'), tvm.size_var('x') + n1, m1 = tvm.size_var('n1'), tvm.size_var('m1') + o0, o1 = tvm.size_var('o0'), tvm.size_var('o1') A = tvm.placeholder((m0, n0), name='A') B = tvm.placeholder((m1, n1), name='B') diff --git a/tests/python/unittest/test_lang_group.py b/tests/python/unittest/test_lang_group.py index dc6837e2be46c..3efc9bc5096b0 100644 --- a/tests/python/unittest/test_lang_group.py +++ b/tests/python/unittest/test_lang_group.py @@ -18,8 +18,8 @@ import tvm def test_scan_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: x[0, i]) @@ -50,8 +50,8 @@ def test_scan_group(): pass def test_compute_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") @@ -64,8 +64,8 @@ def test_compute_group(): assert g.num_child_stages == 2 def test_nest_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") diff --git a/tests/python/unittest/test_lang_schedule.py b/tests/python/unittest/test_lang_schedule.py index 0a653066bff71..eeab81b965b4e 100644 --- a/tests/python/unittest/test_lang_schedule.py +++ b/tests/python/unittest/test_lang_schedule.py @@ -19,9 +19,9 @@ import pickle as pkl def test_schedule_create(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') AA = tvm.compute((m, l), lambda i, j: A[i, j]) @@ -49,7 +49,7 @@ def test_schedule_create(): def test_reorder(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute(m, lambda i: A[i+1]) @@ -69,7 +69,7 @@ def test_reorder(): pass def test_split(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i: A[i]) @@ -79,8 +79,8 @@ def test_split(): def test_tile(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -90,8 +90,8 @@ def test_tile(): def test_fuse(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -119,8 +119,8 @@ def test_singleton(): print("test singleton fin") def test_vectorize(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -156,7 +156,7 @@ def test_pragma(): def test_rfactor(): - n = tvm.var('n') + n = tvm.size_var('n') k1 = tvm.reduce_axis((0, n), name="k1") k2 = tvm.reduce_axis((0, n), name="k2") A = tvm.placeholder((n, n, n), name='A') @@ -214,10 +214,10 @@ def intrin_func(ins, outs): assert(s[z].iter_var_attrs[xi].iter_type == tvm.schedule.IterVar.Tensorized) def test_tensor_intrin_scalar_params(): - n = tvm.var("n") + n = tvm.size_var("n") x = tvm.placeholder((n,), name='x') - v = tvm.var("v") - w = tvm.var("w") + v = tvm.size_var("v") + w = tvm.size_var("w") z = tvm.compute((n,), lambda i: x[i]*v + w, name='z') def intrin_func(ins, outs, sp): diff --git a/tests/python/unittest/test_lang_tag.py b/tests/python/unittest/test_lang_tag.py index a87971657a3f8..fc884ea5bc921 100644 --- a/tests/python/unittest/test_lang_tag.py +++ b/tests/python/unittest/test_lang_tag.py @@ -33,9 +33,9 @@ def compute_conv(data, weight): axis=[ic, dh, dw])) def test_with(): - n = tvm.var('n') - m = tvm.var('m') - l = tvm.var('l') + n = tvm.size_var('n') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((n, l), name='A') B = tvm.placeholder((m, l), name='B') @@ -56,12 +56,12 @@ def test_with(): def test_decorator(): - n = tvm.var('n') - c = tvm.var('c') - h = tvm.var('h') - w = tvm.var('w') - kh = tvm.var('kh') - kw = tvm.var('kw') + n = tvm.size_var('n') + c = tvm.size_var('c') + h = tvm.size_var('h') + w = tvm.size_var('w') + kh = tvm.size_var('kh') + kw = tvm.size_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') @@ -70,12 +70,12 @@ def test_decorator(): assert len(C.op.attrs) == 0 def test_nested(): - n = tvm.var('n') - c = tvm.var('c') - h = tvm.var('h') - w = tvm.var('w') - kh = tvm.var('kh') - kw = tvm.var('kw') + n = tvm.size_var('n') + c = tvm.size_var('c') + h = tvm.size_var('h') + w = tvm.size_var('w') + kh = tvm.size_var('kh') + kw = tvm.size_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') diff --git a/tests/python/unittest/test_lang_tensor.py b/tests/python/unittest/test_lang_tensor.py index 7e9f59bf348da..e363a2cf11bec 100644 --- a/tests/python/unittest/test_lang_tensor.py +++ b/tests/python/unittest/test_lang_tensor.py @@ -18,9 +18,9 @@ from topi.nn.pooling import pool def test_tensor(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -37,7 +37,7 @@ def test_tensor(): def test_rank_zero(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') scale = tvm.placeholder((), name='s') k = tvm.reduce_axis((0, m), name="k") @@ -48,7 +48,7 @@ def test_rank_zero(): def test_conv1d(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n+2), name='A') def computeB(ii): i = ii + 1 @@ -57,14 +57,14 @@ def computeB(ii): def test_tensor_slice(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.compute((n, n), lambda i, j: 1) B = tvm.compute((n,), lambda i: A[0][i] + A[0][i]) def test_tensor_reduce_multi_axis(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') k1 = tvm.reduce_axis((0, n), "k") k2 = tvm.reduce_axis((0, m), "k") @@ -73,23 +73,23 @@ def test_tensor_reduce_multi_axis(): def test_tensor_comm_reducer(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') k = tvm.reduce_axis((0, n), "k") mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) C = tvm.compute((m,), lambda i: mysum(A[i, k], axis=k)) def test_tensor_comm_reducer_overload(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) sum_res = mysum(m, n) def test_tensor_reduce(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -175,8 +175,8 @@ def intrin_func(ins, outs): assert isinstance(stmt.body.body.body[1].body, tvm.stmt.Evaluate) def test_tensor_scan(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.placeholder((m, n)) s = tvm.placeholder((m, n)) res = tvm.scan(tvm.compute((1, n), lambda _, i: x[0, i]), @@ -185,8 +185,8 @@ def test_tensor_scan(): assert tuple(res.shape) == (m, n) def test_scan_multi_out(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x1 = tvm.placeholder((m, n)) s1 = tvm.placeholder((m, n)) x2 = tvm.placeholder((m, n)) @@ -206,7 +206,7 @@ def test_scan_multi_out(): assert isinstance(zz, tvm.tensor.ScanOp) def test_extern(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') def extern_func(ins, outs): @@ -217,7 +217,7 @@ def extern_func(ins, outs): def test_extern_multi_out(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') B = tvm.compute((m,), lambda i: A[i] * 10) @@ -230,8 +230,8 @@ def extern_func(ins, outs): assert(res[1].value_index == 1) def test_tuple_inputs(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A0 = tvm.placeholder((m, n), name='A0') A1 = tvm.placeholder((m, n), name='A1') T0, T1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='T') @@ -244,8 +244,8 @@ def test_tuple_inputs(): assert(T1.value_index == 1) def test_tuple_with_different_deps(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A0 = tvm.placeholder((m, n), name='A1') A1 = tvm.placeholder((m, n), name='A2') B0, B1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='B') diff --git a/tests/python/unittest/test_lang_tensor_overload_op.py b/tests/python/unittest/test_lang_tensor_overload_op.py index 16d67715da450..d205b55a5156d 100644 --- a/tests/python/unittest/test_lang_tensor_overload_op.py +++ b/tests/python/unittest/test_lang_tensor_overload_op.py @@ -87,7 +87,7 @@ def test_combination(): def verify_tensor_scalar_bop(shape, typ="add"): """Verify non-constant Tensor and scalar binary operations.""" - sh = [tvm.var('n%d' % i) for i in range(0, len(shape))] + sh = [tvm.size_var('n%d' % i) for i in range(0, len(shape))] k = tvm.var('k') A = tvm.placeholder(sh, name='A') if typ == "add": diff --git a/tests/python/unittest/test_lang_verify_compute.py b/tests/python/unittest/test_lang_verify_compute.py index f06131ceb8cde..6d17a0ce23722 100644 --- a/tests/python/unittest/test_lang_verify_compute.py +++ b/tests/python/unittest/test_lang_verify_compute.py @@ -17,8 +17,8 @@ import tvm def test_verify_compute(): - n = tvm.var("n") - m = tvm.var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") k_ = tvm.reduce_axis((0, m-1), "k_") diff --git a/tests/python/unittest/test_module_load.py b/tests/python/unittest/test_module_load.py index ba50448253089..e8e43352987ec 100644 --- a/tests/python/unittest/test_module_load.py +++ b/tests/python/unittest/test_module_load.py @@ -46,7 +46,7 @@ def test_dso_module_load(): temp = util.tempdir() def save_object(names): - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) i = tvm.var('i') # for i in 0 to n-1: diff --git a/tests/python/unittest/test_pass_bound_checkers.py b/tests/python/unittest/test_pass_bound_checkers.py index ada81690d05d5..2cefe22432b0d 100644 --- a/tests/python/unittest/test_pass_bound_checkers.py +++ b/tests/python/unittest/test_pass_bound_checkers.py @@ -46,7 +46,7 @@ def lower(sch, args): @pytest.mark.xfail def test_out_of_bounds_llvm(index_a, index_b): - n = tvm.var("n") + n = tvm.size_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i + index_a] + B[i + index_b], name='C') @@ -63,7 +63,7 @@ def test_out_of_bounds_llvm(index_a, index_b): fadd (a, b, c) def test_in_bounds_llvm(): - n = tvm.var("n") + n = tvm.size_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -128,7 +128,7 @@ def test_in_bounds_vectorize_llvm(): tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) def test_in_bounds_loop_partition_basic_llvm(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -147,7 +147,7 @@ def test_in_bounds_loop_partition_basic_llvm(): @pytest.mark.xfail def test_out_of_bounds_loop_partition_basic_llvm(index_a, index_b): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -331,9 +331,9 @@ def test_out_of_bounds_conv_llvm(data_offsets, kernel_offsets, loop_tiling=False f(data_input, kernel_input, conv_out) def test_in_bounds_tensors_with_same_shapes1D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -351,9 +351,9 @@ def test_in_bounds_tensors_with_same_shapes1D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -370,9 +370,9 @@ def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes2D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -390,9 +390,9 @@ def test_in_bounds_tensors_with_same_shapes2D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -409,9 +409,9 @@ def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes3D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') @@ -429,9 +429,9 @@ def test_in_bounds_tensors_with_same_shapes3D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') diff --git a/tests/python/unittest/test_pass_decorate_device_scope.py b/tests/python/unittest/test_pass_decorate_device_scope.py index d36fe8d37964c..9ffd56544ebc7 100644 --- a/tests/python/unittest/test_pass_decorate_device_scope.py +++ b/tests/python/unittest/test_pass_decorate_device_scope.py @@ -17,8 +17,8 @@ import tvm def test_decorate_device(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') diff --git a/tests/python/unittest/test_pass_inline.py b/tests/python/unittest/test_pass_inline.py index e8b86fc75d7e4..511a1438f4bec 100644 --- a/tests/python/unittest/test_pass_inline.py +++ b/tests/python/unittest/test_pass_inline.py @@ -17,7 +17,7 @@ import tvm def test_inline(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(T[10] + 11 * T[100]) @@ -36,7 +36,7 @@ def test_inline(): pass def test_inline2(): - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(tvm.exp(T[10]) + 11 * T[100]) diff --git a/tests/python/unittest/test_pass_loop_partition.py b/tests/python/unittest/test_pass_loop_partition.py index c58b2f6dd2988..9812660d2ad16 100644 --- a/tests/python/unittest/test_pass_loop_partition.py +++ b/tests/python/unittest/test_pass_loop_partition.py @@ -52,7 +52,7 @@ def lower(sch, args): return stmt def test_basic(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -65,6 +65,7 @@ def test_basic(): stmt = tvm.ir_pass.LoopPartition(stmt, False) stmt = tvm.ir_pass.Simplify(stmt) assert('if' not in str(stmt.body.body.body[0])) + assert('if' in str(stmt.body.body.body[1])) def test_const_loop(): n = 21 @@ -83,8 +84,8 @@ def test_const_loop(): def test_multi_loop(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 4, "i") as i: with ib.for_range(0, n, "j") as j: with ib.for_range(0, m, "k") as k: @@ -99,8 +100,8 @@ def test_multi_loop(): def test_multi_if(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 4, 'i') as i: with ib.for_range(0, n, 'j') as j: with ib.for_range(0, m, 'k') as k: @@ -118,8 +119,8 @@ def test_multi_if(): assert('if' not in str(stmt.body[0])) def test_thread_axis(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.compute((m, l), lambda i, j: A[i, j] + 3, name='B') s = tvm.create_schedule(B.op) @@ -137,11 +138,11 @@ def test_thread_axis(): assert('if' not in str(stmt.body.body.body[0])) def test_vectorize(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') - bias = tvm.var("bias", dtype="float32") - scale = tvm.var("scale", dtype="float32") + bias = tvm.size_var("bias", dtype="float32") + scale = tvm.size_var("scale", dtype="float32") C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i) * scale + bias, name='C') # schedule s = tvm.create_schedule(C.op) @@ -160,8 +161,8 @@ def test_vectorize(): def test_condition(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, tvm.truncdiv(n+3,4), 'i') as i: with ib.for_range(0, 4, 'j') as j: ib.emit(tvm.make.Evaluate( @@ -173,8 +174,8 @@ def test_condition(): def test_condition_EQ(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 10, 'i') as i: ib.emit(tvm.make.Evaluate( tvm.make.Select(ib.likely(tvm.expr.EQ(i, 5)), m, n))) @@ -185,7 +186,7 @@ def test_condition_EQ(): def test_thread_axis2(): n = tvm.convert(4096) - m = tvm.var('m') + m = tvm.size_var('m') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -201,8 +202,8 @@ def test_thread_axis2(): assert('threadIdx' not in str(for_body.extent)) def test_everything_during_deduction(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') ib = tvm.ir_builder.create() with ib.for_range(0, n, 'i') as i: with ib.for_range(0, 32, 'j') as j: @@ -252,7 +253,7 @@ def test_multi_likely(): assert(not any(collect_visit(stmt, lambda x: isinstance(x, tvm.stmt.IfThenElse)))) def test_oneD_pool(): - m = tvm.var('m') + m = tvm.size_var('m') ib = tvm.ir_builder.create() #data = tvm.placeholder((16,), name = 'data') data = ib.pointer("float32", name="A") diff --git a/tests/python/unittest/test_pass_makeapi.py b/tests/python/unittest/test_pass_makeapi.py index 77a97d8bffa85..34f32ef01c7c2 100644 --- a/tests/python/unittest/test_pass_makeapi.py +++ b/tests/python/unittest/test_pass_makeapi.py @@ -19,7 +19,7 @@ def test_makeapi(): """Not yet working, mock design""" - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C') diff --git a/tests/python/unittest/test_pass_split_host_device.py b/tests/python/unittest/test_pass_split_host_device.py index c32485d3973b4..e8858b8aa41e0 100644 --- a/tests/python/unittest/test_pass_split_host_device.py +++ b/tests/python/unittest/test_pass_split_host_device.py @@ -19,7 +19,7 @@ @pytest.mark.xfail def test_loop_dependent_allocate(): - N = tvm.var("N") + N = tvm.size_var("N") A = tvm.placeholder((2*N,), "float32", "A") C = tvm.compute((N, ), lambda i: A[2*i] + A[i+1], name='C') s = tvm.create_schedule(C.op) diff --git a/tests/python/unittest/test_pass_storage_flatten.py b/tests/python/unittest/test_pass_storage_flatten.py index da32f60f69fba..2bee66c0a42e3 100644 --- a/tests/python/unittest/test_pass_storage_flatten.py +++ b/tests/python/unittest/test_pass_storage_flatten.py @@ -17,8 +17,8 @@ import tvm def test_flatten2(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') A2 = tvm.compute((m, l), lambda i, j: A1[i, j] + 3, name='A2') @@ -38,8 +38,8 @@ def test_flatten2(): def test_flatten_prefetch(): A = tvm.placeholder((25, 100, 4), name = 'A') _A= tvm.decl_buffer(A.shape, A.dtype, name = 'A'); - i = tvm.var('i') - j = tvm.var('j') + i = tvm.size_var('i') + j = tvm.size_var('j') region = [tvm.make.range_by_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]] stmt = tvm.make.Prefetch(A.op, 0, A.dtype, region) stmt = tvm.ir_pass.StorageFlatten(stmt, {A: _A}, 64) diff --git a/tests/python/unittest/test_pass_storage_sync.py b/tests/python/unittest/test_pass_storage_sync.py index 3202d7b7d3a82..55596eea45796 100644 --- a/tests/python/unittest/test_pass_storage_sync.py +++ b/tests/python/unittest/test_pass_storage_sync.py @@ -17,8 +17,8 @@ import tvm def test_storage_sync(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') @@ -54,7 +54,7 @@ def meminfo_cache(): max_num_bits=128, head_address=tvm.call_extern("handle", "global_cache")) ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: @@ -76,7 +76,7 @@ def meminfo_cache(): def test_coproc_sync2(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") ty = tvm.thread_axis("cthread") A = ib.allocate("float32", 128, name="A") @@ -102,7 +102,7 @@ def __check_list(tvm_array, py_list): return True ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: diff --git a/tests/python/unittest/test_pass_unroll.py b/tests/python/unittest/test_pass_unroll.py index c94ffe0bde14b..e5ef9d0aa2f40 100644 --- a/tests/python/unittest/test_pass_unroll.py +++ b/tests/python/unittest/test_pass_unroll.py @@ -21,7 +21,7 @@ def test_unroll_loop(): ib = tvm.ir_builder.create() dtype = 'int64' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -54,7 +54,7 @@ def test_unroll_loop(): def test_unroll_fake_loop(): ib = tvm.ir_builder.create() dtype = 'int32' - n = tvm.var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -68,7 +68,7 @@ def test_unroll_fake_loop(): assert isinstance(ret[0], tvm.stmt.Store) def test_unroll_single_count_loops(): - n = tvm.var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute((n,), lambda *i: A(*i), name='B') s = tvm.create_schedule(B.op) diff --git a/topi/python/topi/nn/conv2d.py b/topi/python/topi/nn/conv2d.py index 169878c11a85e..664a293d56ae7 100644 --- a/topi/python/topi/nn/conv2d.py +++ b/topi/python/topi/nn/conv2d.py @@ -142,18 +142,18 @@ def conv2d_infer_layout(workload, cfg): def _get_workload(data, kernel, stride, padding, out_dtype, data_layout='NCHW'): """ Get the workload structure. """ if data_layout == 'NCHW': - _, CI, IH, IW = [x.value for x in data.shape] + _, CI, IH, IW = get_const_tuple(data.shape) elif data_layout == 'NHWC': - _, IH, IW, CI = [x.value for x in data.shape] + _, IH, IW, CI = get_const_tuple(data.shape) elif data_layout == 'HWCN': - IH, IW, CI, _ = [x.value for x in data.shape] + IH, IW, CI, _ = get_const_tuple(data.shape) else: raise ValueError("not support this layout {} yet".format(data_layout)) if data_layout == 'NCHW': - CO, CIG, KH, KW = [x.value for x in kernel.shape] + CO, CIG, KH, KW = get_const_tuple(kernel.shape) else: - KH, KW, CIG, CO = [x.value for x in kernel.shape] + KH, KW, CIG, CO = get_const_tuple(kernel.shape) HPAD, WPAD, _, _ = get_pad_tuple(padding, (get_const_int(KH), get_const_int(KW))) GRPS = CI // CIG