diff --git a/docs/api/python/relay/index.rst b/docs/api/python/relay/index.rst index 39a68b6d1f5d..90746b8e5d4e 100644 --- a/docs/api/python/relay/index.rst +++ b/docs/api/python/relay/index.rst @@ -33,7 +33,8 @@ compiler stack. expr frontend image - ir_pass + analysis + transform module nn op diff --git a/include/tvm/relay/pass.h b/include/tvm/relay/analysis.h similarity index 71% rename from include/tvm/relay/pass.h rename to include/tvm/relay/analysis.h index 79172c374316..e3d16b6eda73 100644 --- a/include/tvm/relay/pass.h +++ b/include/tvm/relay/analysis.h @@ -18,42 +18,21 @@ */ /*! - * \file tvm/relay/pass.h - * \brief The set of Relay passes written in C++. - */ -#ifndef TVM_RELAY_PASS_H_ -#define TVM_RELAY_PASS_H_ + * \file tvm/relay/analysis.h + * \brief The set of Relay analysis passes written in C++. + */ +#ifndef TVM_RELAY_ANALYSIS_H_ +#define TVM_RELAY_ANALYSIS_H_ -#include -#include +#include #include #include -#include #include -#include -#include -#include #include -#include namespace tvm { namespace relay { -/*! - * \brief Infer the type of an expression. - * - * The result of type checking is a new expression with unambigous - * type information filled in, as well as it's checked type field - * populated with the result type. - * - * \param expr The expression to type check. - * \param mod The module used for referencing global functions, can be - * None. - * - * \return A type checked expression with its checked_type field populated. - */ -TVM_DLL Expr InferType(const Expr& expr, const Module& mod); - /*! * \brief Infer the type of a function as if it is mapped to var in the mod. * @@ -64,7 +43,8 @@ TVM_DLL Expr InferType(const Expr& expr, const Module& mod); * \return A type checked Function with its checked_type field populated. * \note this function mutates mod and is not thread-safe. */ -TVM_DLL Function InferType(const Function& f, const Module& mod, +TVM_DLL Function InferType(const Function& f, + const Module& mod, const GlobalVar& var); /*! @@ -271,58 +251,6 @@ TVM_DLL tvm::Array AllTypeVars(const Expr& expr, const Module& mod); */ TVM_DLL tvm::Array AllTypeVars(const Type& t, const Module& mod); -/*! - * \brief Fold constant expressions. - * - * \param expr the expression to be optimized. - * - * \return The optimized expression. - */ -TVM_DLL Expr FoldConstant(const Expr& expr); - -/*! - * \brief Fuse operations into expr into seperate functions. - * - * \param expr The expression. - * \param fuse_opt_level Optimization level. - * \param mod the module. - * - * \return The optimized expression. - */ -TVM_DLL Expr FuseOps(const Expr& expr, int fuse_opt_level, const Module& mod); - -/*! - * \brief Apply rewrite rules to rewrite the expr in post DFS order. - * - * \param expr The expression. - * \param rewrite_map_attr_name The Op's attr name which corresponds to the rewrite - * rule function. - * \param fcontext Additional callback to provide context argument for each call node. - * \param fmulti_ref_trigger Transformation function to be called when - * an Expr consumed by multiple callers. - * \return The rewritten expression. - */ -TVM_DLL Expr ForwardRewrite(const Expr& expr, - const std::string& rewrite_map_attr_name, - std::function fcontext = nullptr, - std::function fmulti_ref_trigger = nullptr); - -/*! - * \brief Apply rewrite rules to rewrite the expr in post DFS order. - * - * \param expr The expression. - * \param rewrite_func The rewrite func that will apply to all operators. - * \param fcontext Additional callback to provide context argument for each call node. - * \param fmulti_ref_trigger Transformation function to be called when - * an Expr consumed by multiple callers. - * - * \return The rewritten expression. - */ -TVM_DLL Expr ForwardRewrite(const Expr& expr, - const FForwardRewrite& rewrite_func, - std::function fcontext = nullptr, - std::function fmulti_ref_trigger = nullptr); - /*! * \brief Rewrite the annotated program. * @@ -364,19 +292,6 @@ TVM_DLL Map CollectDeviceAnnotationOps(const Expr& expr); */ TVM_DLL Array UnmatchedCases(const Match& match, const Module& mod); -/*! - * \brief Bind the free variables to a Relay expression. - * - * Parameter binding can only happen if expr is a Function. - * binds cannot change internal arguments of internal functions. - * - * \param expr The function to be binded. - * \param binds The map of arguments to - * - * \return The expression with all free vars bound. - */ -TVM_DLL Expr Bind(const Expr& expr, const tvm::Map& binds); - /*! \brief A hashing structure in the style of std::hash. */ struct StructuralHash { /*! \brief Hash a Relay type. @@ -388,7 +303,6 @@ struct StructuralHash { * \return the hash value. */ size_t operator()(const Type& type) const; - /*! \brief Hash a Relay expression. * * Implements structural hashing of a Relay expression. @@ -400,20 +314,7 @@ struct StructuralHash { size_t operator()(const Expr& expr) const; }; -namespace vm { - -/*! - * \brief Compile a module, and construct the virtual machine. - * - * \param mod The module to compile. - * - * \return The constructed virtual machine. - */ -runtime::vm::VirtualMachine CompileModule(const Module& mod); - -} // namespace vm - } // namespace relay } // namespace tvm -#endif // TVM_RELAY_PASS_H_ +#endif // TVM_RELAY_ANALYSIS_H_ diff --git a/include/tvm/relay/transform.h b/include/tvm/relay/transform.h index 9ae71d824f94..bb8638abbabf 100644 --- a/include/tvm/relay/transform.h +++ b/include/tvm/relay/transform.h @@ -378,36 +378,6 @@ TVM_DLL Pass FoldConstant(); */ TVM_DLL Pass FuseOps(int fuse_opt_level = -1); -/*! - * \brief Apply rewrite rules to rewrite the expr in post DFS order. - * - * \param rewrite_map_attr_name The Op's attr name which corresponds to the rewrite - * rule function. - * \param fcontext Additional callback to provide context argument for each call node. - * \param fmulti_ref_trigger Transformation function to be called when - * an Expr consumed by multiple callers. - * - * \return The pass. - */ -TVM_DLL Pass ForwardRewrite(const std::string& rewrite_map_attr_name, - std::function fcontext = nullptr, - std::function - fmulti_ref_trigger = nullptr); - -/*! - * \brief Apply rewrite rules to rewrite the expr in post DFS order. - * - * \param rewrite_func The rewrite func that will apply to all operators. - * \param fcontext Additional callback to provide context argument for each call node. - * \param fmulti_ref_trigger Transformation function to be called when - * an Expr consumed by multiple callers. - * - * \return The pass. - */ -TVM_DLL Pass ForwardRewrite(const FForwardRewrite& rewrite_func, - std::function fcontext = nullptr, - std::function fmulti_ref_trigger = nullptr); - /*! * \brief Rewrite the annotated program. * @@ -554,21 +524,68 @@ TVM_DLL Pass CanonicalizeCast(); */ TVM_DLL Pass EtaExpand(); +} // namespace transform + /*! - * \brief This is a helper function that runs a some optimization passes on - * a certain expression and returns the optimized version. With the help of this - * function, users don't need to manually construct a module, then perform - * passes, and finally and extract the target function/expression from the - * returned module frequently. + * \brief Bind the free variables to a Relay expression. This is a helper + * function usually called by other pass functions to help optimizations. * - * \param expr The expression to be optimized. - * \param passes The passses that will be applied on the given expression. + * \param expr The input expression. + * \param binds The variable to expression map that will be used to help the + * binding. * - * \return The optimized expression. + * \return The updated expression. */ -TVM_DLL Expr OptimizeOnExpr(const Expr& expr, const Array& passes); +TVM_DLL Expr Bind(const Expr& expr, const tvm::Map& binds); + +/*! + * \brief Infer the type of a function as if it is mapped to var in the mod. + * + * \param f the function. + * \param mod The module used for referencing global functions. + * \param var The global variable corresponding to the function. + * + * \return A type checked Function with its checked_type field populated. + * \note this function mutates mod and is not thread-safe. + */ +TVM_DLL Function InferType(const Function& f, + const Module& mod, + const GlobalVar& var); + +/*! + * \brief Apply rewrite rules to rewrite the expr in post DFS order. This + * function is used as a helper function to rewrtie an expression in a pass. + * + * \param expr The expression. + * \param rewrite_map_attr_name The Op's attr name which corresponds to the rewrite + * rule function. + * \param fcontext Additional callback to provide context argument for each call node. + * \param fmulti_ref_trigger Transformation function to be called when + * an Expr consumed by multiple callers. + * \return The rewritten expression. + */ +TVM_DLL Expr ForwardRewrite(const Expr& expr, + const std::string& rewrite_map_attr_name, + std::function fcontext = nullptr, + std::function fmulti_ref_trigger = nullptr); + +/*! + * \brief Apply rewrite rules to rewrite the expr in post DFS order. This + * function is used as a helper function to rewrtie an expression in a pass. + * + * \param expr The expression. + * \param rewrite_func The rewrite func that will apply to all operators. + * \param fcontext Additional callback to provide context argument for each call node. + * \param fmulti_ref_trigger Transformation function to be called when + * an Expr consumed by multiple callers. + * + * \return The rewritten expression. + */ +TVM_DLL Expr ForwardRewrite(const Expr& expr, + const FForwardRewrite& rewrite_func, + std::function fcontext = nullptr, + std::function fmulti_ref_trigger = nullptr); -} // namespace transform } // namespace relay } // namespace tvm diff --git a/nnvm/tests/python/compiler/test_to_relay.py b/nnvm/tests/python/compiler/test_to_relay.py index e79831d06cf2..dac14a8c1f22 100644 --- a/nnvm/tests/python/compiler/test_to_relay.py +++ b/nnvm/tests/python/compiler/test_to_relay.py @@ -18,7 +18,7 @@ from nnvm import testing from nnvm import to_relay import tvm -from tvm.relay import ir_pass +from tvm.relay import transform from tvm.relay import create_executor from tvm.contrib import graph_runtime import numpy as np @@ -41,10 +41,11 @@ def check_model(sym, shapes, dtypes, params): nnvm_rts.run(**inputs) nnvm_out = nnvm_rts.get_output(0) relay_model, params = to_relay.to_relay(net, shapes, dtypes, params) - relay_model = ir_pass.infer_type(relay_model) - relay_rts = create_executor(kind='graph', ctx=tvm.cpu(0), target='llvm') + mod = tvm.relay.Module.from_expr(relay_model) + mod = transform.InferType()(mod) + relay_rts = create_executor(kind='graph', mod=mod, ctx=tvm.cpu(0), target='llvm') inputs.update(params) - relay_out = relay_rts.evaluate(relay_model)(*list(inputs.values())) + relay_out = relay_rts.evaluate()(*list(inputs.values())) np.testing.assert_allclose(nnvm_out.asnumpy(), relay_out.asnumpy()) # def test_mlp(): diff --git a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py index 7e7f1749eae7..62e409fec1a0 100644 --- a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py +++ b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -21,6 +21,7 @@ import topi from tvm import relay, autotvm +from tvm.relay import transform from tvm.relay.expr import Call, Function, TupleGetItem, Var, Constant, Tuple from tvm.relay.ty import TupleType, TensorType from tvm.autotvm.task import TaskExtractEnv @@ -80,6 +81,14 @@ def expr2graph(expr, target_ops, node_dict, node_list): task_pos += 1 +def _infer_type(node): + """A method to infer the type of a relay expression.""" + mod = relay.Module.from_expr(node) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(node, relay.Function) else entry.body + + def _expr2graph_impl(expr, target_ops, node_dict, node_list): """Implementation to convert relay expr to graph data structure """ @@ -99,7 +108,7 @@ def _traverse_expr(node): node_entry["inputs"] += node_list[in_node_idx]["inputs"] else: node_entry["inputs"].append([in_node_idx, 0, 0]) - infer_out = relay.ir_pass.infer_type(node) + infer_out = _infer_type(node) out_type = infer_out._checked_type_ if isinstance(out_type, TensorType): node_entry["types"].append(out_type) @@ -168,7 +177,7 @@ def _traverse_expr(node): node_dict[node] = node_index node_list.append(node_entry) - relay.ir_pass.post_order_visit(expr, _traverse_expr) + relay.analysis.post_order_visit(expr, _traverse_expr) def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names): diff --git a/python/tvm/autotvm/graph_tuner/utils/utils.py b/python/tvm/autotvm/graph_tuner/utils/utils.py index 6151734299af..797a38ae3698 100644 --- a/python/tvm/autotvm/graph_tuner/utils/utils.py +++ b/python/tvm/autotvm/graph_tuner/utils/utils.py @@ -17,6 +17,7 @@ # pylint: disable=eval-used,invalid-name,too-many-arguments """Utility functions""" from tvm import relay +from tvm.relay import transform def has_multiple_inputs(node_list, node_idx, input_names): @@ -107,4 +108,7 @@ def bind_inputs(expr, input_shapes=None, input_dtypes="float32"): rebind_dict[var] = updated_input_dict[var.name_hint] updated_expr = relay.expr.bind(expr, rebind_dict) - return relay.ir_pass.infer_type(updated_expr) + mod = relay.Module.from_expr(updated_expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(updated_expr, relay.Function) else entry.body diff --git a/python/tvm/relay/__init__.py b/python/tvm/relay/__init__.py index 5536e503e6b6..dfac85bb1ed2 100644 --- a/python/tvm/relay/__init__.py +++ b/python/tvm/relay/__init__.py @@ -24,7 +24,7 @@ from . import expr_functor from . import module from . import adt -from . import ir_pass +from . import analysis from . import transform from .build_module import build, create_executor from .transform import build_config @@ -32,6 +32,7 @@ from . import parser from . import debug from . import param_dict +from . import feature # Root operators from .op import Op @@ -101,7 +102,7 @@ bind = expr.bind module_pass = transform.module_pass function_pass = transform.function_pass -alpha_equal = ir_pass.alpha_equal +alpha_equal = analysis.alpha_equal # ExprFunctor ExprFunctor = expr_functor.ExprFunctor @@ -122,3 +123,6 @@ ModulePass = transform.ModulePass FunctionPass = transform.FunctionPass Sequential = transform.Sequential + +# Feature +Feature = feature.Feature diff --git a/python/tvm/relay/_ir_pass.py b/python/tvm/relay/_analysis.py similarity index 89% rename from python/tvm/relay/_ir_pass.py rename to python/tvm/relay/_analysis.py index 3a0e0ac846b9..32a7324ae29f 100644 --- a/python/tvm/relay/_ir_pass.py +++ b/python/tvm/relay/_analysis.py @@ -14,8 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -"""FFI exposing the Relay type inference and checking.""" +"""FFI exposing the passes for Relay program analysis.""" from tvm._ffi.function import _init_api -_init_api("relay._ir_pass", __name__) +_init_api("relay._analysis", __name__) diff --git a/python/tvm/relay/ir_pass.py b/python/tvm/relay/analysis.py similarity index 53% rename from python/tvm/relay/ir_pass.py rename to python/tvm/relay/analysis.py index 52dc34d7aac9..ee8ce985fcbc 100644 --- a/python/tvm/relay/ir_pass.py +++ b/python/tvm/relay/analysis.py @@ -20,7 +20,7 @@ This file contains the set of passes for Relay, which exposes an interface for configuring the passes and scripting them in Python. """ -from . import _ir_pass +from . import _analysis from . import _make from .expr import Expr from .ty import Type @@ -41,71 +41,7 @@ def post_order_visit(expr, fvisit): fvisit : function The visitor function to be applied. """ - return _ir_pass.post_order_visit(expr, fvisit) - -def infer_type(expr, mod=None): - """Infer the type of expr under the context of mod. - - Parameters - ---------- - expr: tvm.relay.Expr - The input expression. - - mod: Optional[tvm.relay.Module] - The global module. - - Returns - ------- - checked_expr : tvm.relay.Expr - The checked expression. - """ - return _ir_pass.infer_type(expr, mod) - - -def backward_fold_scale_axis(expr): - """Backward fold axis scaling into weights of conv2d/dense. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression, we expect that expr's types - should be fully inferred by infer_type. - - Returns - ------- - folded_expr : tvm.relay.Expr - The folded expression after transformation. - - Note - ---- - It is recommended to call backward_fold_scale_axis - before using forward_fold_scale_axis. - As backward folding targets common conv-bn pattern. - """ - return _ir_pass.backward_fold_scale_axis(expr) - - -def forward_fold_scale_axis(expr): - """Fold the scaling of axis into weights of conv2d/dense. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression, we expect that expr's types - should be fully inferred by infer_type. - - Returns - ------- - folded_expr : tvm.relay.Expr - The folded expression after transformation. - - Note - ---- - It is recommended to call backward_fold_scale_axis - before using forward_fold_scale_axis. - As backward folding targets common conv-bn pattern. - """ - return _ir_pass.forward_fold_scale_axis(expr) + return _analysis.post_order_visit(expr, fvisit) def well_formed(expr): @@ -121,12 +57,13 @@ def well_formed(expr): well_form : bool Whether the input expression is well formed """ - return _ir_pass.well_formed(expr) + return _analysis.well_formed(expr) def check_kind(t, mod=None): """Check that the type is well kinded and return the kind. - For example, this mean type cannot has tensor of tensor, or is a tuple type of 2 shapes. + For example, this mean type cannot has tensor of tensor, or is a tuple type + of 2 shapes. Parameters ---------- @@ -149,9 +86,9 @@ def check_kind(t, mod=None): assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Type)])) == Type """ if mod is not None: - return _ir_pass.check_kind(t, mod) + return _analysis.check_kind(t, mod) else: - return _ir_pass.check_kind(t) + return _analysis.check_kind(t) def free_vars(expr): @@ -173,7 +110,7 @@ def free_vars(expr): neural networks: usually this means weights of previous are ordered first. """ - return _ir_pass.free_vars(expr) + return _analysis.free_vars(expr) def bound_vars(expr): @@ -189,7 +126,7 @@ def bound_vars(expr): free : List[tvm.relay.Var] The list of bound variables in post-DFS order. """ - return _ir_pass.bound_vars(expr) + return _analysis.bound_vars(expr) def all_vars(expr): @@ -205,7 +142,7 @@ def all_vars(expr): free : List[tvm.relay.Var] The list of all variables in post-DFS order. """ - return _ir_pass.all_vars(expr) + return _analysis.all_vars(expr) def free_type_vars(expr, mod=None): @@ -225,7 +162,7 @@ def free_type_vars(expr, mod=None): The list of free type variables in post-DFS order """ use_mod = mod if mod is not None else Module() - return _ir_pass.free_type_vars(expr, use_mod) + return _analysis.free_type_vars(expr, use_mod) def bound_type_vars(expr, mod=None): @@ -245,7 +182,7 @@ def bound_type_vars(expr, mod=None): The list of bound type variables in post-DFS order """ use_mod = mod if mod is not None else Module() - return _ir_pass.bound_type_vars(expr, use_mod) + return _analysis.bound_type_vars(expr, use_mod) def all_type_vars(expr, mod=None): @@ -255,6 +192,7 @@ def all_type_vars(expr, mod=None): ---------- expr : Union[tvm.relay.Expr,tvm.relay.Type] The input expression/type + mod : Optional[tvm.relay.Module] The global module @@ -264,41 +202,7 @@ def all_type_vars(expr, mod=None): The list of all type variables in post-DFS order """ use_mod = mod if mod is not None else Module() - return _ir_pass.all_type_vars(expr, use_mod) - - -def simplify_inference(expr): - """ Simplify the data-flow graph for inference phase. - - Parameters - ---------- - expr : tvm.relay.Expr - The input Expression - - Returns - ------- - result : tvm.relay.Expr - An expression which is semantically equal to the input expression, - but with some simplification - """ - return _ir_pass.simplify_inference(expr) - - -def canonicalize_ops(expr): - """ Canonicalize special operators to basic operators. - This can simplify latter analysis. (e.g. Expand bias_add to expand_dims and broadcast_add.) - - Parameters - ---------- - expr : tvm.relay.Expr - The input Expression - - Returns - ------- - result : tvm.relay.Expr - An expression without bias_add - """ - return _ir_pass.canonicalize_ops(expr) + return _analysis.all_type_vars(expr, use_mod) def alpha_equal(lhs, rhs): @@ -342,128 +246,6 @@ def graph_equal(lhs, rhs): return bool(_make._graph_equal(lhs, rhs)) -def structural_hash(value): - """Hash a Relay expression structurally. - - Parameters - ---------- - expr : Union[tvm.relay.Expr, tvm.relay.Type] - The expression to hash. - - Returns - ------- - result : int - The hash value - """ - if isinstance(value, Expr): - return int(_ir_pass._expr_hash(value)) - elif isinstance(value, Type): - return int(_ir_pass._type_hash(value)) - else: - msg = ("found value of type {0} expected" + - "relay.Expr or relay.Type").format(type(value)) - raise TypeError(msg) - - -def fold_constant(expr): - """Fold the constant expression in expr. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - Returns - ------- - transformed_expr : tvm.relay.Expr - The transformed expression. - """ - return _ir_pass.FoldConstant(expr) - - -def fuse_ops(expr, opt_level=1, mod=None): - """Fuse operators in expr together. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - opt_level : int - The level of fuse optimization. - - mod : tvm.relay.Module - The module to perform fusion over. - - Returns - ------- - transformed_expr : tvm.relay.Expr - Transformed expression, containing fused result. - """ - return _ir_pass.FuseOps(expr, opt_level, mod) - - -def combine_parallel_conv2d(expr, min_num_branches=3): - """Combine multiple conv2d into one. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - min_num_branches : int - The minimum number of parallel branches when the transformation should be applied. - - Returns - ------- - transformed_expr : tvm.relay.Expr - Transformed expression - """ - return _ir_pass.CombineParallelConv2D(expr, min_num_branches) - - -def alter_op_layout(expr): - """Alternate the layouts of operators or replace primitive operators with - other expressions. - This pass can be used for computing convolution in custom layouts or - other general weight pre-transformation. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - Returns - ------- - transformed_expr : tvm.relay.Expr - Transformed expression with alternated layout. - """ - return _ir_pass.AlterOpLayout(expr) - - -def rewrite_annotated_ops(expr, fallback_device): - """Rewrite the annotated program where annotation operators, e.g. - `on_deivce`, mark which device an expression should be scheduled to. - This pass helps heterogeneous execution where different operators may need - to be allocated on various devices. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - fallback_device : int - The fallback device type. It is also used as the default device for - operators with no annotated device. - - Returns - ------- - transformed_expr : tvm.relay.Expr - Transformed expression with cross device data copy operators. - """ - return _ir_pass.RewriteDeviceAnnotation(expr, fallback_device) - - def collect_device_info(expr): """Collect the device allocation map for the given expression. The device ids are propagated from the `device_copy` operators. @@ -478,7 +260,7 @@ def collect_device_info(expr): ret : Dict[tvm.relay.expr, int] A dictionary mapping tvm.relay.Expr to device type. """ - return _ir_pass.CollectDeviceInfo(expr) + return _analysis.CollectDeviceInfo(expr) def collect_device_annotation_ops(expr): @@ -495,38 +277,7 @@ def collect_device_annotation_ops(expr): A dictionary mapping tvm.relay.Expr to device type where the keys are annotation expressions. """ - return _ir_pass.CollectDeviceAnnotationOps(expr) - - -def gradient(expr, mod=None, mode='higher_order'): - """ - Transform the input function, - returning a function that calculate the original result, - paired with gradient of the input. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression, which is a Function or a GlobalVar. - - mod : Optional[tvm.relay.Module] - - mode : Optional[String] - The mode of the automatic differentiation algorithm. - 'first_order' only work on first order code, but will not produce reference nor closure. - 'higher_order' work on all code using reference and closure. - - Returns - ------- - expr : tvm.relay.Expr - The transformed expression. - """ - if mode == 'first_order': - return _ir_pass.first_order_gradient(expr, mod) - elif mode == 'higher_order': - return _ir_pass.gradient(expr, mod) - else: - raise Exception('unknown mode') + return _analysis.CollectDeviceAnnotationOps(expr) def get_total_mac_number(expr): @@ -543,27 +294,7 @@ def get_total_mac_number(expr): result : int64 The number of MACs (multiply-accumulate) of a model """ - return _ir_pass.GetTotalMacNumber(expr) - - -def eliminate_common_subexpr(expr, fskip=None): - """ - Eliminate common subexpressions. - - Parameters - ---------- - expr : tvm.relay.Expr - The input expression. - - fskip : function - The callback function that decides whether an expression should be skipped. - - Returns - ------- - result : tvm.relay.Expr - The output expression. - """ - return _ir_pass.eliminate_common_subexpr(expr, fskip) + return _analysis.GetTotalMacNumber(expr) def unmatched_cases(match, mod=None): @@ -574,15 +305,16 @@ def unmatched_cases(match, mod=None): ---------- match : tvm.relay.Match The match expression + mod : Optional[tvm.relay.Module] The module (defaults to an empty module) Returns ------- missing_patterns : [tvm.relay.Pattern] - Patterns that the match expression does not catch. + Patterns that the match expression does not catch. """ - return _ir_pass.unmatched_cases(match, mod) + return _analysis.unmatched_cases(match, mod) def detect_feature(a, b=None): @@ -605,4 +337,27 @@ def detect_feature(a, b=None): """ if isinstance(a, Module): a, b = b, a - return set([Feature(int(x)) for x in _ir_pass.detect_feature(a, b)]) + return set([Feature(int(x)) for x in _analysis.detect_feature(a, b)]) + + +def structural_hash(value): + """Hash a Relay expression structurally. + + Parameters + ---------- + expr : Union[tvm.relay.Expr, tvm.relay.Type] + The expression to hash. + + Returns + ------- + result : int + The hash value + """ + if isinstance(value, Expr): + return int(_analysis._expr_hash(value)) + elif isinstance(value, Type): + return int(_analysis._type_hash(value)) + else: + msg = ("found value of type {0} expected" + + "relay.Expr or relay.Type").format(type(value)) + raise TypeError(msg) diff --git a/python/tvm/relay/backend/interpreter.py b/python/tvm/relay/backend/interpreter.py index cf643f61243c..5b7d9eda46b4 100644 --- a/python/tvm/relay/backend/interpreter.py +++ b/python/tvm/relay/backend/interpreter.py @@ -21,7 +21,7 @@ import numpy as np from . import _backend -from .. import _make, ir_pass, transform +from .. import _make, analysis, transform from .. import module from ... import register_func, nd from ..base import NodeBase, register_relay_node @@ -239,7 +239,7 @@ def evaluate(self, expr=None, binds=None): return self._make_executor() if isinstance(expr, Function): - assert not ir_pass.free_vars(expr) + assert not analysis.free_vars(expr) if isinstance(expr, (Function, GlobalVar)): return self._make_executor(expr) diff --git a/python/tvm/relay/expr.pyi b/python/tvm/relay/expr.pyi index b7395c365390..d264e99e0577 100644 --- a/python/tvm/relay/expr.pyi +++ b/python/tvm/relay/expr.pyi @@ -19,7 +19,7 @@ from typing import List import tvm from .base import Span, NodeBase from .ty import Type, TypeParam -from ._ir_pass import _get_checked_type +from ._analysis import _get_checked_type class Expr(NodeBase): @@ -128,4 +128,4 @@ class If(Expr): def __init__(self, cond, true_value, false_value): # type: (Expr, Expr, Expr) -> None - ... \ No newline at end of file + ... diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index 18489b380ee7..91f0409b39d5 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -18,7 +18,7 @@ """Caffe2 frontend""" from __future__ import absolute_import as _abs import tvm -from .. import ir_pass +from .. import analysis from .. import expr as _expr from .. import module as _module from .. import op as _op @@ -450,7 +450,7 @@ def from_caffe2(self, init_net, predict_net): else: outputs = out[0] - func = _expr.Function(ir_pass.free_vars(outputs), outputs) + func = _expr.Function(analysis.free_vars(outputs), outputs) self._mod[self._mod.entry_func] = func return self._mod, self._params diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index efd198803c2b..6d8e14569e73 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -19,8 +19,8 @@ import logging from topi.util import get_const_tuple from .. import expr as _expr -from .. import expr as _expr -from .. import ir_pass +from .. import module as _module +from .. import transform as _transform from .. import op as _op @@ -407,9 +407,17 @@ def get_name(node): name = node.name_hint return name + +def infer_type(node): + """A method to infer the type of an intermediate node in the relay graph.""" + mod = _module.Module.from_expr(node) + mod = _transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(node, _expr.Function) else entry.body + def infer_shape(inputs): """A method to get the output shape of an intermediate node in the graph.""" - out_type = ir_pass.infer_type(inputs) + out_type = infer_type(inputs) out_shapes = get_const_tuple(out_type.checked_type.shape) return out_shapes @@ -417,7 +425,7 @@ def infer_channels(inputs, transpose=False): """A hack for getting 'channels' or 'units' since caffe2 does not provide these attributes. We check the shape of weights provided to get the number. """ - out_type = ir_pass.infer_type(inputs) + out_type = infer_type(inputs) out_shapes = [get_const_tuple(out_type.checked_type.shape)] channels = out_shapes[0][0] if not transpose else out_shapes[0][1] return channels diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index 1cac547d07c9..e7b129e66724 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -19,7 +19,7 @@ from __future__ import absolute_import as _abs import numpy as np import tvm -from .. import ir_pass +from .. import analysis from .. import expr as _expr from .. import module as _module from .. import op as _op @@ -462,6 +462,6 @@ def from_coreml(model, shape=None): for o in spec.description.output] # for now return first output outexpr = outexpr[0] - func = _expr.Function(ir_pass.free_vars(outexpr), outexpr) + func = _expr.Function(analysis.free_vars(outexpr), outexpr) params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()} return _module.Module.from_expr(func), params diff --git a/python/tvm/relay/frontend/darknet.py b/python/tvm/relay/frontend/darknet.py index 7b26ed5692df..f452146ae46c 100644 --- a/python/tvm/relay/frontend/darknet.py +++ b/python/tvm/relay/frontend/darknet.py @@ -23,7 +23,7 @@ from enum import Enum import numpy as np import tvm -from .. import ir_pass +from .. import analysis from .. import expr as _expr from .. import module as _module from .common import get_relay_op, new_var @@ -820,7 +820,7 @@ def from_darknet(self): outputs = _as_list(sym) + self._outs outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) - sym = _expr.Function(ir_pass.free_vars(outputs), outputs) + sym = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(sym), self._tvmparams def from_darknet(net, diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index ad033f9bf326..91da87c84b80 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -20,7 +20,7 @@ import sys import numpy as np import tvm -from .. import ir_pass +from .. import analysis from .. import expr as _expr from .. import module as _module from .. import op as _op @@ -743,6 +743,6 @@ def _convert_input_layer(keras_layer): outexpr = [etab.get_expr(oc[0].name + ":" + str(oc[1]) + ":" + str(oc[2])) \ for oc in model._output_coordinates] outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr) - func = _expr.Function(ir_pass.free_vars(outexpr), outexpr) + func = _expr.Function(analysis.free_vars(outexpr), outexpr) params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()} return _module.Module.from_expr(func), params diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 0bcee63ad3e8..26c357e9c924 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -20,7 +20,7 @@ import json import tvm -from .. import ir_pass +from .. import analysis, transform from .. import expr as _expr from .. import op as _op from .. import module as _module @@ -41,6 +41,13 @@ "relu" : _op.nn.relu } +def _infer_type(node): + """A method to infer the type of an intermediate node in the relay graph.""" + mod = _module.Module.from_expr(node) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(node, _expr.Function) else entry.body + def _mx_fully_connected(inputs, attrs): import mxnet as mx units = attrs.get_int("num_hidden") @@ -89,7 +96,8 @@ def _stable_softrelu(x): def _mx_compare(new_op, wrapper): def impl(inputs, attrs): - dtype = ir_pass.infer_type(inputs[0]).checked_type.dtype + expr = _infer_type(inputs[0]) + dtype = expr.checked_type.dtype return wrapper(new_op)(inputs, attrs).astype(dtype) return impl @@ -258,7 +266,8 @@ def _mx_slice_like(inputs, attrs): def _mx_slice_axis(inputs, attrs): assert len(inputs) == 1 - shape = ir_pass.infer_type(inputs[0]).checked_type.shape + expr = _infer_type(inputs[0]) + shape = expr.checked_type.shape axis = attrs.get_int("axis") ax_beg = attrs.get_int("begin") ax_end = attrs.get_str("end") @@ -302,7 +311,8 @@ def _mx_crop_like(inputs, attrs): if offset == (0, 0): new_attrs["axes"] = (2, 3) return _op.slice_like(*inputs, **new_attrs) - like_shape = ir_pass.infer_type(inputs[1]).checked_type.shape + expr = _infer_type(inputs[1]) + like_shape = expr.checked_type.shape new_attrs['begin'] = [0, 0, offset[0], offset[1]] new_attrs['end'] = [like_shape[0], like_shape[1], offset[0]+like_shape[2], offset[1]+like_shape[3]] @@ -532,7 +542,8 @@ def _mx_resize(inputs, attrs): scale_width = attrs.get_float("scale_width", None) height = attrs.get_int("height", 1) width = attrs.get_int("width", 1) - shape = ir_pass.infer_type(inputs[0]).checked_type.shape + expr = _infer_type(inputs[0]) + shape = expr.checked_type.shape if scale_height is not None: height = (scale_height * shape[2]).astype("int32") if scale_width is not None: @@ -639,7 +650,8 @@ def _mx_broadcast_axis(inputs, attrs): assert len(axis) == len(size) if len(axis) == 0: return inputs[0] - src_shape = ir_pass.infer_type(inputs[0])._checked_type_.shape + expr = _infer_type(inputs[0]) + src_shape = expr.checked_type.shape tgt_shape = [] for i, dim in enumerate(src_shape): if i not in axis: @@ -734,7 +746,8 @@ def _rnn_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias, activati return out, [out] def _gru_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias): - dtype = ir_pass.infer_type(data).checked_type.dtype + expr = _infer_type(data) + dtype = expr.checked_type.dtype i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1) h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1) i2h_r, i2h_z, i2h = _op.split(i2h, indices_or_sections=3, axis=1) @@ -776,7 +789,8 @@ def _lstm_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias): seq_data = inputs[0] concat_weight = inputs[1] init_states = inputs[2:] - data_shape = ir_pass.infer_type(seq_data).checked_type.shape + expr = _infer_type(seq_data) + data_shape = expr.checked_type.shape seq_len = int(data_shape[0]) assert len(concat_weight) == num_layers * 4 * direct @@ -1099,7 +1113,7 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info, mod=None): outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) - func = _expr.Function(ir_pass.free_vars(outputs), outputs) + func = _expr.Function(analysis.free_vars(outputs), outputs) return func diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index bb968ec0bea8..397ca90de55f 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -22,7 +22,7 @@ import numpy as np import tvm from ... import nd as _nd -from .. import ir_pass +from .. import analysis from .. import transform as _transform from .. import expr as _expr from .. import module as _module @@ -412,7 +412,7 @@ def _impl_v1(cls, inputs, attr, params): else: data, shape = inputs logging.warning("Constant evaluating Reshape's shape argument, may reduce performance") - shape_params = ir_pass.free_vars(shape) + shape_params = analysis.free_vars(shape) func = _expr.Function(shape_params, shape) mod = _module.Module.from_expr(func) seq = _transform.Sequential([_transform.InferType(), @@ -1106,7 +1106,7 @@ def from_onnx(self, graph, opset): # now return the outputs outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) - func = _expr.Function(ir_pass.free_vars(outputs), outputs) + func = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(func), self._params def _parse_value_proto(self, value_proto): diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index d754e85ef78d..e14566f6ab33 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -27,7 +27,8 @@ import tvm from topi.util import get_const_tuple -from .. import ir_pass +from .. import analysis +from .. import transform as _transform from .. import expr as _expr from .. import op as _op from ..expr_functor import ExprMutator @@ -38,9 +39,9 @@ def _infer_value(input_val, params): from tvm.contrib import graph_runtime # Check that all free variables have associated parameters. - assert all(var.name_hint in params.keys() for var in ir_pass.free_vars( + assert all(var.name_hint in params.keys() for var in analysis.free_vars( input_val)), "All inputs to infer must be available in params." - func = _expr.Function(ir_pass.free_vars(input_val), input_val) + func = _expr.Function(analysis.free_vars(input_val), input_val) with tvm.relay.build_config(opt_level=0): graph, lib, params = tvm.relay.build(func, target="llvm", params=params) ctx = tvm.context("llvm", 0) @@ -235,9 +236,16 @@ def _infer_out_shapes(inputs, params): """A method to get the output shape of intermediate nodes in the relay graph.""" return [_infer_shape(inputs, params)] +def _infer_type(node): + """A method to infer the type of an intermediate node in the relay graph.""" + mod = _module.Module.from_expr(node) + mod = _transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(node, _expr.Function) else entry.body + def _infer_shape(node, params=None): """A method to get the output shape of an intermediate node in the relay graph.""" - out_type = ir_pass.infer_type(node) + out_type = _infer_type(node) return get_const_tuple(out_type.checked_type.shape) def _get_param(params, input_node): @@ -1841,7 +1849,8 @@ def _while_loop(self): bind_map = {} for i, var in enumerate(self.loop_vars): if not isinstance(var, _expr.Var): - var_type = ir_pass.infer_type(var).checked_type + var_chk = _infer_type(var) + var_type = var_chk.checked_type else: var_type = var.type_annotation @@ -2112,7 +2121,7 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): out.append(out_rnn) out = out[0] if len(out) == 1 else _expr.Tuple(out) - func = _expr.Function(ir_pass.free_vars(out), out) + func = _expr.Function(analysis.free_vars(out), out) self._mod[self._mod.entry_func] = func return self._mod, self._params @@ -2329,7 +2338,8 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ else: if node_name_prefix not in self._branches: self._branches[node_name_prefix] = Branch() - self._branches[node_name_prefix].cond = ir_pass.infer_type(op[0]) + chk_op = _infer_type(op[0]) + self._branches[node_name_prefix].cond = chk_op elif node.op == "NextIteration": op = self._nodes[node.input[0]] assert len(op) == 1 diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index fe163871fa60..bf1938b1481e 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -20,7 +20,7 @@ import math import numpy as np import tvm -from .. import ir_pass +from .. import analysis from .. import expr as _expr from .. import module as _module from .. import op as _op @@ -914,5 +914,5 @@ def from_tflite(model, shape_dict, dtype_dict): params = {k:_nd.array(np.array(v)) for k, v in exp_tab.params.items()} outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) - func = _expr.Function(ir_pass.free_vars(outputs), outputs) + func = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(func), params diff --git a/python/tvm/relay/module.py b/python/tvm/relay/module.py index 1a5e82269a96..097dbbb8ecaf 100644 --- a/python/tvm/relay/module.py +++ b/python/tvm/relay/module.py @@ -79,15 +79,6 @@ def _add(self, var, val, update=False): if isinstance(val, _expr.Expr): if isinstance(var, _base.string_types): var = _expr.GlobalVar(var) - - # TODO(@jroesch): Port this logic to C++. - if not isinstance(val, _expr.Function): - if isinstance(val, _expr.GlobalVar): - val = ir_pass.eta_expand(val, self) - else: - val = _expr.Function([], val) - - _make.Module_Add(self, var, val, update) else: assert isinstance(val, _ty.Type) diff --git a/python/tvm/relay/quantize/quantize.py b/python/tvm/relay/quantize/quantize.py index fa70e1954467..b7994217e964 100644 --- a/python/tvm/relay/quantize/quantize.py +++ b/python/tvm/relay/quantize/quantize.py @@ -22,7 +22,7 @@ from . import _quantize from .. import expr as _expr from .. import module as _module -from .. import ir_pass as _ir_pass +from .. import analysis as _analysis from .. import transform as _transform from .. import op as _op from ... import make as _make @@ -250,7 +250,7 @@ def _make_const(val): const_params[nclip_min] = _make_const(- (valid_range - 1)) const_params[nclip_max] = _make_const((valid_range - 1)) - _ir_pass.post_order_visit(graph, visit_func) + _analysis.post_order_visit(graph, visit_func) return _expr.bind(graph, const_params) diff --git a/python/tvm/relay/testing/dcgan.py b/python/tvm/relay/testing/dcgan.py index 4ee0bd13a5a7..e9a914ecd69a 100644 --- a/python/tvm/relay/testing/dcgan.py +++ b/python/tvm/relay/testing/dcgan.py @@ -81,7 +81,7 @@ def get_net(batch_size, random_len=100, oshape=(3, 64, 64), ngf=128, code=None, dc32, ishape=(ngf, 32, 32), oshape=oshape[-3:], kshape=(4, 4), name="g5_deconv") tanh = relay.tanh(dc64) - args = relay.ir_pass.free_vars(tanh) + args = relay.analysis.free_vars(tanh) return relay.Function(args, tanh) diff --git a/python/tvm/relay/testing/densenet.py b/python/tvm/relay/testing/densenet.py index de3ebe36eb7b..573a4bc36794 100644 --- a/python/tvm/relay/testing/densenet.py +++ b/python/tvm/relay/testing/densenet.py @@ -79,7 +79,7 @@ def _make_dense_net(num_init_features, growth_rate, block_config, ret = layers.dense_add_bias(flat, units=classes, name='dense') - return relay.Function(relay.ir_pass.free_vars(ret), ret) + return relay.Function(relay.analysis.free_vars(ret), ret) def get_workload(densenet_size=121, classes=1000, batch_size=4, image_shape=(3, 224, 224), dtype='float32'): diff --git a/python/tvm/relay/testing/dqn.py b/python/tvm/relay/testing/dqn.py index 034ac0a6c2e5..fdf46fbc2f7c 100644 --- a/python/tvm/relay/testing/dqn.py +++ b/python/tvm/relay/testing/dqn.py @@ -54,7 +54,7 @@ def get_net(batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="float32" relu4 = relay.nn.relu(dense1) dense2 = layers.dense_add_bias(relu4, units=num_actions, name="dense2") - args = relay.ir_pass.free_vars(dense2) + args = relay.analysis.free_vars(dense2) return relay.Function(args, dense2) diff --git a/python/tvm/relay/testing/inception_v3.py b/python/tvm/relay/testing/inception_v3.py index c9ec3293ed0a..c3f0181f2951 100644 --- a/python/tvm/relay/testing/inception_v3.py +++ b/python/tvm/relay/testing/inception_v3.py @@ -266,7 +266,7 @@ def get_net(batch_size, fc1 = relay.nn.dense(flatten, relay.var("fc1_weight"), units=num_classes) fc1 = relay.nn.bias_add(fc1, relay.var("fc2_bias"), axis=-1) inception_v3 = relay.nn.softmax(data=fc1) - args = relay.ir_pass.free_vars(inception_v3) + args = relay.analysis.free_vars(inception_v3) return relay.Function(args, inception_v3) def get_workload(batch_size=1, num_classes=1000, diff --git a/python/tvm/relay/testing/init.py b/python/tvm/relay/testing/init.py index b246b4617276..20b5156990a7 100644 --- a/python/tvm/relay/testing/init.py +++ b/python/tvm/relay/testing/init.py @@ -150,10 +150,11 @@ def create_workload(net, initializer=None, seed=0): params : dict of str to NDArray The parameters. """ - net = relay.ir_pass.infer_type(net) + mod = relay.Module.from_expr(net) + mod = relay.transform.InferType()(mod) + net = mod[mod.entry_func] shape_dict = { v.name_hint : v.checked_type for v in net.params} - net.astext() np.random.seed(seed) initializer = initializer if initializer else Xavier() params = {} diff --git a/python/tvm/relay/testing/lstm.py b/python/tvm/relay/testing/lstm.py index b0915e033ccb..9721c26f2a15 100644 --- a/python/tvm/relay/testing/lstm.py +++ b/python/tvm/relay/testing/lstm.py @@ -154,7 +154,7 @@ def get_net(iterations, num_hidden, batch_size=1, dtype="float32"): builder.ret(out) body = builder.get() - args = relay.ir_pass.free_vars(body) + args = relay.analysis.free_vars(body) return relay.Function(args, body, input_type) diff --git a/python/tvm/relay/testing/mlp.py b/python/tvm/relay/testing/mlp.py index 562ef21ba9f1..e178408a6a1b 100644 --- a/python/tvm/relay/testing/mlp.py +++ b/python/tvm/relay/testing/mlp.py @@ -58,7 +58,7 @@ def get_net(batch_size, fc3 = relay.nn.dense(act2, relay.var("fc3_weight"), units=num_classes) fc3 = relay.nn.bias_add(fc3, relay.var("fc3_bias"), axis=-1) mlp = relay.nn.softmax(data=fc3) - args = relay.ir_pass.free_vars(mlp) + args = relay.analysis.free_vars(mlp) return relay.Function(args, mlp) diff --git a/python/tvm/relay/testing/mobilenet.py b/python/tvm/relay/testing/mobilenet.py index 78e1d82456c8..dff103150ab0 100644 --- a/python/tvm/relay/testing/mobilenet.py +++ b/python/tvm/relay/testing/mobilenet.py @@ -108,7 +108,7 @@ def mobile_net(num_classes=1000, data_shape=(1, 3, 224, 224), weight = relay.var('fc_weight') fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes) softmax = relay.nn.softmax(data=fc) - return relay.Function(relay.ir_pass.free_vars(softmax), softmax) + return relay.Function(relay.analysis.free_vars(softmax), softmax) def get_workload(batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype='float32'): diff --git a/python/tvm/relay/testing/resnet.py b/python/tvm/relay/testing/resnet.py index 9ba57ae09ef5..f67785917384 100644 --- a/python/tvm/relay/testing/resnet.py +++ b/python/tvm/relay/testing/resnet.py @@ -169,7 +169,7 @@ def resnet(units, flat = relay.nn.batch_flatten(data=pool1) fc1 = layers.dense_add_bias(data=flat, units=num_classes, name='fc1') net = relay.nn.softmax(data=fc1) - return relay.Function(relay.ir_pass.free_vars(net), net) + return relay.Function(relay.analysis.free_vars(net), net) def get_net(batch_size, diff --git a/python/tvm/relay/testing/squeezenet.py b/python/tvm/relay/testing/squeezenet.py index c7b8e8db166b..5c90265183ff 100644 --- a/python/tvm/relay/testing/squeezenet.py +++ b/python/tvm/relay/testing/squeezenet.py @@ -119,7 +119,7 @@ def get_net(batch_size, image_shape, num_classes, version, dtype): net = relay.nn.global_avg_pool2d(net) net = relay.nn.batch_flatten(net) net = relay.nn.softmax(net) - args = relay.ir_pass.free_vars(net) + args = relay.analysis.free_vars(net) return relay.Function(args, net) diff --git a/python/tvm/relay/testing/vgg.py b/python/tvm/relay/testing/vgg.py index bec141f70ffd..06d9aa3d2d93 100644 --- a/python/tvm/relay/testing/vgg.py +++ b/python/tvm/relay/testing/vgg.py @@ -90,7 +90,7 @@ def get_net(batch_size, image_shape, num_classes, dtype, num_layers=11, batch_no feature = get_feature(data, layers, filters, batch_norm) classifier = get_classifier(feature, num_classes) symbol = relay.nn.softmax(data=classifier) - args = relay.ir_pass.free_vars(symbol) + args = relay.analysis.free_vars(symbol) return relay.Function(args, symbol) diff --git a/python/tvm/relay/transform.py b/python/tvm/relay/transform.py index ba4857dc4d36..255718c627f0 100644 --- a/python/tvm/relay/transform.py +++ b/python/tvm/relay/transform.py @@ -277,6 +277,40 @@ def FoldScaleAxis(): return _transform.FoldScaleAxis() +def BackwardFoldScaleAxis(): + """Backward fold axis scaling into weights of conv2d/dense. + + Returns + ------- + ret : tvm.relay.Pass + The registered pass to backward fold expressions. + + Note + ---- + It is recommended to call backward_fold_scale_axis + before using forward_fold_scale_axis. + As backward folding targets common conv-bn pattern. + """ + return _transform.BackwardFoldScaleAxis() + + +def ForwardFoldScaleAxis(): + """Fold the scaling of axis into weights of conv2d/dense. + + Returns + ------- + ret : tvm.relay.Pass + The registered pass to forward fold expressions. + + Note + ---- + It is recommended to call backward_fold_scale_axis + before using forward_fold_scale_axis. + As backward folding targets common conv-bn pattern. + """ + return _transform.ForwardFoldScaleAxis() + + def SimplifyInference(): """Simplify the data-flow graph for inference phase. An simplified expression which is semantically equal to the input expression will be returned. @@ -406,7 +440,7 @@ def ToANormalForm(): Returns ------- - ret: tvm.relay.Pass + ret: Union[tvm.relay.Pass, tvm.relay.Expr] The registered pass that transforms an expression into A Normal Form. """ return _transform.ToANormalForm() @@ -454,6 +488,21 @@ def EliminateCommonSubexpr(fskip=None): def PartialEvaluate(): """Evaluate the static fragment of the code. + Note + ---- + This transformation could be either `Module -> Module` or `Expr -> Expr`. + It will directly transform the input expression to a new one if the target + expression is provided. Otherwise, it will rely on the pass manager to + carry out transformation. + + Parameters + ---------- + expr : Optional[tvm.relay.Expr] + The input expression. + + mod : Optional[tvm.relay.Module] + The global module. + Returns ------- ret: tvm.relay.Pass @@ -461,6 +510,7 @@ def PartialEvaluate(): """ return _transform.PartialEvaluate() + def CanonicalizeCast(): """ Canonicalize cast expressions to make operator fusion more efficient. @@ -473,28 +523,35 @@ def CanonicalizeCast(): return _transform.CanonicalizeCast() -def OptimizeOnExpr(expr, passes): - """Perform optimization passes on an expressioin. +def gradient(expr, mod=None, mode='higher_order'): + """ + Transform the input function, + returning a function that calculate the original result, + paired with gradient of the input. Parameters ---------- - expr: tvm.relay.Expr - The expression for optimization. + expr : tvm.relay.Expr + The input expression, which is a Function or a GlobalVar. - passes: Union[Pass, List[Pass]] - The list of optimizations to be applied. + mod : Optional[tvm.relay.Module] + + mode : Optional[String] + The mode of the automatic differentiation algorithm. + 'first_order' only works on first order code, but will not produce + reference nor closure. + 'higher_order' works on all code using reference and closure. Returns ------- - ret: tvm.relay.Expr - The optimized expression. + expr : tvm.relay.Expr + The transformed expression. """ - if isinstance(passes, Pass): - passes = [passes] - if not isinstance(passes, (list, tuple)): - raise TypeError("passes must be a pass or a list of pass objects.") - - return _transform.OptimizeOnExpr(expr, passes) + if mode == 'first_order': + return _transform.first_order_gradient(expr, mod) + if mode == 'higher_order': + return _transform.gradient(expr, mod) + raise Exception('unknown mode') def _wrap_class_module_pass(pass_cls, pass_info): diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc index 3feb7e4a4b54..3ab57f166d90 100644 --- a/src/relay/backend/build_module.cc +++ b/src/relay/backend/build_module.cc @@ -21,6 +21,7 @@ * \file relay/backend/build_module.cc * \brief Code generation for TVM's graph runtime. */ +#include #include #include #include diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 7ae1befcfe89..83e4a36ff4f9 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/relay/backend/compile_engine.h b/src/relay/backend/compile_engine.h index 9b510ad2fd29..9765cf90da18 100644 --- a/src/relay/backend/compile_engine.h +++ b/src/relay/backend/compile_engine.h @@ -27,8 +27,9 @@ #define TVM_RELAY_BACKEND_COMPILE_ENGINE_H_ #include +#include #include -#include +#include #include #include diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index 5c2e5c4c289a..91a597baceaf 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -25,7 +25,7 @@ */ #include #include -#include +#include #include "../../common/arena.h" namespace tvm { diff --git a/src/relay/backend/interpreter.cc b/src/relay/backend/interpreter.cc index ff2d9e6117ab..7c97befc55f9 100644 --- a/src/relay/backend/interpreter.cc +++ b/src/relay/backend/interpreter.cc @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include "compile_engine.h" diff --git a/src/relay/backend/utils.h b/src/relay/backend/utils.h index 65a7efd4c205..139dab21e973 100644 --- a/src/relay/backend/utils.h +++ b/src/relay/backend/utils.h @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/src/relay/backend/vm/lambda_lift.cc b/src/relay/backend/vm/lambda_lift.cc index 668c024a8d55..6290ef7c6e93 100644 --- a/src/relay/backend/vm/lambda_lift.cc +++ b/src/relay/backend/vm/lambda_lift.cc @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/relay/backend/vm/vm.cc b/src/relay/backend/vm/vm.cc index cf0b952005fc..4dbcda9abb6f 100644 --- a/src/relay/backend/vm/vm.cc +++ b/src/relay/backend/vm/vm.cc @@ -28,17 +28,18 @@ #include #include #include -#include +#include namespace tvm { namespace relay { namespace vm { +runtime::vm::VirtualMachine CompileModule(const Module& mod); + using tvm::runtime::Object; using tvm::runtime::ObjectTag; using tvm::runtime::vm::VirtualMachine; - VirtualMachine FromModule(const Module& module, const std::vector& ctxs) { auto vm = CompileModule(module); vm.Init(ctxs); diff --git a/src/relay/ir/alpha_equal.cc b/src/relay/ir/alpha_equal.cc index 81017d4fddfa..42e66261a553 100644 --- a/src/relay/ir/alpha_equal.cc +++ b/src/relay/ir/alpha_equal.cc @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include "type_functor.h" #include "../../lang/attr_functor.h" diff --git a/src/relay/ir/expr_functor.cc b/src/relay/ir/expr_functor.cc index e09d79082227..36692c5c571b 100644 --- a/src/relay/ir/expr_functor.cc +++ b/src/relay/ir/expr_functor.cc @@ -345,7 +345,7 @@ void PostOrderVisit(const Expr& e, std::function fvisit) { ExprApplyVisit(fvisit).VisitExpr(e); } -TVM_REGISTER_API("relay._ir_pass.post_order_visit") +TVM_REGISTER_API("relay._analysis.post_order_visit") .set_body_typed([](Expr expr, PackedFunc f) { PostOrderVisit(expr, [f](const Expr& n) { f(n); diff --git a/src/relay/ir/hash.cc b/src/relay/ir/hash.cc index c57475476e58..6039ba272ddc 100644 --- a/src/relay/ir/hash.cc +++ b/src/relay/ir/hash.cc @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include "type_functor.h" #include "../../lang/attr_functor.h" @@ -412,12 +412,12 @@ size_t StructuralHash::operator()(const Expr& expr) const { return RelayHashHandler().ExprHash(expr); } -TVM_REGISTER_API("relay._ir_pass._expr_hash") +TVM_REGISTER_API("relay._analysis._expr_hash") .set_body_typed([](NodeRef ref) { return static_cast(RelayHashHandler().Hash(ref)); }); -TVM_REGISTER_API("relay._ir_pass._type_hash") +TVM_REGISTER_API("relay._analysis._type_hash") .set_body_typed([](Type type) { return static_cast(RelayHashHandler().TypeHash(type)); }); diff --git a/src/relay/ir/module.cc b/src/relay/ir/module.cc index 58f614a3cc77..51a2aeeeb111 100644 --- a/src/relay/ir/module.cc +++ b/src/relay/ir/module.cc @@ -23,7 +23,8 @@ * \brief The global module in Relay. */ #include -#include +#include +#include #include namespace tvm { @@ -184,7 +185,26 @@ TVM_REGISTER_API("relay._make.Module") .set_body_typed(ModuleNode::make); TVM_REGISTER_API("relay._make.Module_Add") -.set_body_method(&ModuleNode::Add); +.set_body([](TVMArgs args, TVMRetValue* ret) { + Module mod = args[0]; + GlobalVar var = args[1]; + NodeRef val = args[2]; + bool update = args[3]; + CHECK(val->derived_from()); + if (val->derived_from()) { + mod->Add(var, Downcast(val), update); + } else if (val->derived_from()) { + GlobalVar gv = Downcast(val); + auto mod_copy = Module(make_node(*mod.operator->())); + mod_copy = transform::EtaExpand()(mod_copy); + auto func = mod_copy->Lookup(gv->name_hint); + mod->Add(var, Downcast(func), update); + } else { + auto func = FunctionNode::make({}, Downcast(val), Type(nullptr), {}); + mod->Add(var, func, update); + } + *ret = mod; +}); TVM_REGISTER_API("relay._module.Module_AddDef") .set_body_method(&ModuleNode::AddDef); @@ -197,39 +217,39 @@ TVM_REGISTER_API("relay._module.Module_GetGlobalTypeVar") TVM_REGISTER_API("relay._module.Module_Lookup") .set_body_typed([](Module mod, GlobalVar var) { - return mod->Lookup(var); - }); + return mod->Lookup(var); +}); TVM_REGISTER_API("relay._module.Module_Lookup_str") .set_body_typed([](Module mod, std::string var) { - return mod->Lookup(var); - }); + return mod->Lookup(var); +}); TVM_REGISTER_API("relay._module.Module_LookupDef") .set_body_typed([](Module mod, GlobalTypeVar var) { - return mod->LookupDef(var); - }); + return mod->LookupDef(var); +}); TVM_REGISTER_API("relay._module.Module_LookupDef_str") .set_body_typed([](Module mod, std::string var) { - return mod->LookupDef(var); - }); + return mod->LookupDef(var); +}); TVM_REGISTER_API("relay._module.Module_FromExpr") .set_body_typed([](Expr e) { - return ModuleNode::FromExpr(e); + return ModuleNode::FromExpr(e); }); TVM_REGISTER_API("relay._module.Module_Update") .set_body_typed([](Module mod, Module from) { - mod->Update(from); - }); + mod->Update(from); +}); TVM_STATIC_IR_FUNCTOR_REGISTER(IRPrinter, vtable) .set_dispatch( - [](const ModuleNode *node, tvm::IRPrinter *p) { - p->stream << "ModuleNode( " << node->functions << ")"; - }); + [](const ModuleNode *node, tvm::IRPrinter *p) { + p->stream << "ModuleNode( " << node->functions << ")"; +}); } // namespace relay } // namespace tvm diff --git a/src/relay/pass/alter_op_layout.cc b/src/relay/pass/alter_op_layout.cc index cc71968fba58..82424500ffc8 100644 --- a/src/relay/pass/alter_op_layout.cc +++ b/src/relay/pass/alter_op_layout.cc @@ -24,7 +24,8 @@ other expressions. This pass can be used for computing convolution in custom layouts or other general weight pre-transformation. */ -#include +#include +#include #include #include #include @@ -348,9 +349,6 @@ Expr AlterOpLayout(const Expr& expr) { return ForwardRewrite(expr, AlterOpLayoutRewrite, fcontext); } -TVM_REGISTER_API("relay._ir_pass.AlterOpLayout") -.set_body_typed(AlterOpLayout); - } // namespace alter_op_layout namespace transform { diff --git a/src/relay/pass/canonicalize_cast.cc b/src/relay/pass/canonicalize_cast.cc index 99f4a7f44e7e..04fec248f81c 100644 --- a/src/relay/pass/canonicalize_cast.cc +++ b/src/relay/pass/canonicalize_cast.cc @@ -22,7 +22,7 @@ * \file canonicalize_cast.cc * \brief Canonicalize cast expressions to make operator fusion more efficient. */ -#include +#include #include #include #include diff --git a/src/relay/pass/canonicalize_ops.cc b/src/relay/pass/canonicalize_ops.cc index ff9e2304a3bc..fc0c43d200e5 100644 --- a/src/relay/pass/canonicalize_ops.cc +++ b/src/relay/pass/canonicalize_ops.cc @@ -23,7 +23,7 @@ * \brief Canonicalize special operators to basic operators. This can simplify latter analysis. (e.g. Expand bias_add to expand_dims and broadcast_add.) */ -#include +#include #include #include #include @@ -61,9 +61,6 @@ Expr CanonicalizeOps(const Expr& e) { return BiasAddSimplifier().Mutate(e); } -TVM_REGISTER_API("relay._ir_pass.canonicalize_ops") -.set_body_typed(CanonicalizeOps); - namespace transform { Pass CanonicalizeOps() { diff --git a/src/relay/pass/combine_parallel_conv2d.cc b/src/relay/pass/combine_parallel_conv2d.cc index c95c1ddf8e16..d72705c8ce47 100644 --- a/src/relay/pass/combine_parallel_conv2d.cc +++ b/src/relay/pass/combine_parallel_conv2d.cc @@ -33,7 +33,7 @@ * convolution branches, such as Inception block. */ -#include +#include #include #include #include @@ -355,9 +355,6 @@ Expr CombineParallelConv2D(const Expr& expr, uint64_t min_num_branches) { return ParallelConv2DCombiner(min_num_branches).Combine(expr); } -TVM_REGISTER_API("relay._ir_pass.CombineParallelConv2D") -.set_body_typed(CombineParallelConv2D); - namespace transform { Pass CombineParallelConv2D(uint64_t min_num_branches) { diff --git a/src/relay/pass/dead_code.cc b/src/relay/pass/dead_code.cc index 8799bf403375..54075f0699e6 100644 --- a/src/relay/pass/dead_code.cc +++ b/src/relay/pass/dead_code.cc @@ -28,8 +28,9 @@ * CalcDep turn an expr into a dependency graph of expr, * GenLet turn the dependency graph into a let list, taking only the used value. */ -#include +#include #include +#include #include "let_list.h" namespace tvm { diff --git a/src/relay/pass/device_annotation.cc b/src/relay/pass/device_annotation.cc index 8eeb493f1feb..aec974b184d3 100644 --- a/src/relay/pass/device_annotation.cc +++ b/src/relay/pass/device_annotation.cc @@ -34,7 +34,6 @@ #include #include #include -#include #include #include @@ -559,13 +558,13 @@ Map CollectDeviceAnnotationOps(const Expr& expr) { return AnnotatationVisitor::GetAnnotations(expr); } -TVM_REGISTER_API("relay._ir_pass.CollectDeviceInfo") +TVM_REGISTER_API("relay._analysis.CollectDeviceInfo") .set_body_typed(CollectDeviceInfo); -TVM_REGISTER_API("relay._ir_pass.RewriteDeviceAnnotation") +TVM_REGISTER_API("relay._analysis.RewriteDeviceAnnotation") .set_body_typed(RewriteAnnotatedOps); -TVM_REGISTER_API("relay._ir_pass.CollectDeviceAnnotationOps") +TVM_REGISTER_API("relay._analysis.CollectDeviceAnnotationOps") .set_body_typed(CollectDeviceAnnotationOps); namespace transform { diff --git a/src/relay/pass/eliminate_common_subexpr.cc b/src/relay/pass/eliminate_common_subexpr.cc index 883681adcaf4..33a791b2bd99 100644 --- a/src/relay/pass/eliminate_common_subexpr.cc +++ b/src/relay/pass/eliminate_common_subexpr.cc @@ -27,7 +27,7 @@ * to replace an expression with a previously appeared expression with the same input and * attributes. The fskip callback argument allows us to skip specific expressions. */ -#include +#include #include #include #include @@ -85,9 +85,6 @@ Expr EliminateCommonSubexpr(const Expr& expr, PackedFunc callback) { return CommonSubexprEliminator(callback)(expr); } -TVM_REGISTER_API("relay._ir_pass.eliminate_common_subexpr") -.set_body_typed(EliminateCommonSubexpr); - namespace transform { Pass EliminateCommonSubexpr(PackedFunc fskip) { diff --git a/src/relay/pass/eta_expand.cc b/src/relay/pass/eta_expand.cc index 3139d41d6393..e73e3778395e 100644 --- a/src/relay/pass/eta_expand.cc +++ b/src/relay/pass/eta_expand.cc @@ -25,7 +25,8 @@ * \brief Add abstraction over a function. For example, abs will become (fun x -> abs x). * */ -#include +#include +#include namespace tvm { namespace relay { @@ -44,10 +45,8 @@ Expr EtaExpand(const Expr& e, const Module& mod) { original_type_params = func->type_params; ret_type = func->ret_type; } else { - auto inferred = InferType(e, mod); - CHECK(inferred->is_type()); - - auto func = GetRef(inferred.as_derived()); + CHECK(e->is_type()); + auto func = GetRef(e.as_derived()); original_params = func->params; original_type_params = func->type_params; ret_type = func->ret_type; @@ -62,19 +61,18 @@ Expr EtaExpand(const Expr& e, const Module& mod) { auto new_func = FunctionNode::make(args, CallNode::make(e, params), ret_type, original_type_params); - return InferType(new_func, mod); + return new_func; } -TVM_REGISTER_API("relay._ir_pass.eta_expand").set_body_typed(EtaExpand); - namespace transform { Pass EtaExpand() { runtime::TypedPackedFunc pass_func = [=](Function f, Module m, PassContext pc) { - return Downcast(EtaExpand(f, m)); - }; - return CreateFunctionPass(pass_func, 1, "EtaExpand", {}); + return Downcast(EtaExpand(f, m)); + }; + Pass expanded = CreateFunctionPass(pass_func, 1, "EtaExpand", {}); + return Sequential({expanded, InferType()}); } TVM_REGISTER_API("relay._transform.EtaExpand") diff --git a/src/relay/pass/feature.cc b/src/relay/pass/feature.cc index e86ca0621112..df3a5d7ecec5 100644 --- a/src/relay/pass/feature.cc +++ b/src/relay/pass/feature.cc @@ -23,7 +23,7 @@ * \brief Detect features used in Expr/Module */ #include -#include +#include #include #include #include @@ -97,7 +97,7 @@ Array PyDetectFeature(const Expr& expr, const Module& mod) { return static_cast>(fs); } -TVM_REGISTER_API("relay._ir_pass.detect_feature") +TVM_REGISTER_API("relay._analysis.detect_feature") .set_body_typed(PyDetectFeature); } // namespace relay diff --git a/src/relay/pass/fold_constant.cc b/src/relay/pass/fold_constant.cc index 815407038b08..71d189b0800f 100644 --- a/src/relay/pass/fold_constant.cc +++ b/src/relay/pass/fold_constant.cc @@ -21,7 +21,7 @@ * Copyright (c) 2018 by Contributors * \file constant_folding.cc */ -#include +#include #include #include #include @@ -156,9 +156,13 @@ class ConstantFolder : public ExprMutator { } // Constant evaluate a expression. Expr ConstEvaluate(Expr expr) { - expr = InferType(expr, Module(nullptr)); - expr = FuseOps(expr, 0, Module(nullptr)); - expr = InferType(expr, Module(nullptr)); + std::vector passes = {transform::FuseOps(0), + transform::InferType()}; + auto mod = ModuleNode::FromExpr(expr); + auto seq = transform::Sequential(passes); + mod = seq(mod); + auto entry_func = mod->Lookup(mod->entry_func); + expr = expr.as() == nullptr ? entry_func->body : entry_func; return ValueToExpr(executor_(expr)); } // Evaluate shape_of op @@ -213,9 +217,6 @@ Expr FoldConstant(const Expr& expr) { Module(nullptr), ctx, target)).Mutate(expr); } -TVM_REGISTER_API("relay._ir_pass.FoldConstant") -.set_body_typed(FoldConstant); - namespace transform { Pass FoldConstant() { diff --git a/src/relay/pass/fold_scale_axis.cc b/src/relay/pass/fold_scale_axis.cc index 53089807ace5..868a08f8b576 100644 --- a/src/relay/pass/fold_scale_axis.cc +++ b/src/relay/pass/fold_scale_axis.cc @@ -26,7 +26,7 @@ * conv/dense operators. */ #include -#include +#include #include #include #include @@ -545,10 +545,6 @@ Expr ForwardFoldScaleAxis(const Expr& data) { data, "FScaleAxisForwardRewrite", fcontext); } -// Expose the FoldScaleAxisFoward -TVM_REGISTER_API("relay._ir_pass.forward_fold_scale_axis") -.set_body_typed(ForwardFoldScaleAxis); - //---------------------------------------- // Implement backward transformations. //---------------------------------------- @@ -947,9 +943,6 @@ Expr BackwardFoldScaleAxis(const Expr& data) { return make_node()->Fold(data); } -TVM_REGISTER_API("relay._ir_pass.backward_fold_scale_axis") -.set_body_typed(BackwardFoldScaleAxis); - } // namespace fold_scale_axis namespace transform { @@ -964,6 +957,9 @@ Pass ForwardFoldScaleAxis() { {ir::StringImm::make("InferType")}); } +TVM_REGISTER_API("relay._transform.ForwardFoldScaleAxis") +.set_body_typed(ForwardFoldScaleAxis); + Pass BackwardFoldScaleAxis() { runtime::TypedPackedFunc pass_func = [=](Function f, Module m, PassContext pc) { @@ -974,6 +970,9 @@ Pass BackwardFoldScaleAxis() { {ir::StringImm::make("InferType")}); } +TVM_REGISTER_API("relay._transform.BackwardFoldScaleAxis") +.set_body_typed(BackwardFoldScaleAxis); + Pass FoldScaleAxis() { // FoldScaleAxis pass contains the following three passes. Therefore, we can // register it as a sequential pass. diff --git a/src/relay/pass/forward_rewrite.cc b/src/relay/pass/forward_rewrite.cc index 8ad61270e33a..6c66d6e982a7 100644 --- a/src/relay/pass/forward_rewrite.cc +++ b/src/relay/pass/forward_rewrite.cc @@ -23,9 +23,9 @@ * \file forward_rewrite.cc * \brief Apply rewriting rules in a forward fashion. */ -#include #include #include +#include #include "pass_util.h" namespace tvm { @@ -206,37 +206,5 @@ Expr ForwardRewrite(const Expr& expr, return ForwardRewriter(&rewrite_func, fcontext, fmulti_ref_trigger).Rewrite(expr); } -namespace transform { - -using std::function; - -Pass ForwardRewrite(const std::string& rewrite_map_attr_name, - function fcontext, - function fmulti_ref_trigger) { - runtime::TypedPackedFunc pass_func = - [=](Function f, Module m, PassContext pc) { - return Downcast(ForwardRewrite(f, - rewrite_map_attr_name, - fcontext, - fmulti_ref_trigger)); - }; - return CreateFunctionPass(pass_func, 1, "ForwardRewrite", {}); -} - -Pass ForwardRewrite(const FForwardRewrite& rewrite_func, - function fcontext, - function fmulti_ref_trigger) { - runtime::TypedPackedFunc pass_func = - [=](Function f, Module m, PassContext pc) { - return Downcast(ForwardRewrite(f, - rewrite_func, - fcontext, - fmulti_ref_trigger)); - }; - return CreateFunctionPass(pass_func, 1, "ForwardRewriteFunc", {}); -} - -} // namespace transform - } // namespace relay } // namespace tvm diff --git a/src/relay/pass/fuse_ops.cc b/src/relay/pass/fuse_ops.cc index 9f940e54953b..cdd283746365 100644 --- a/src/relay/pass/fuse_ops.cc +++ b/src/relay/pass/fuse_ops.cc @@ -26,7 +26,7 @@ * Fuse necessary ops into a single one. */ #include -#include +#include #include #include #include @@ -963,9 +963,6 @@ Expr FuseOps(const Expr& expr, int fuse_opt_level, const Module& module) { } } -TVM_REGISTER_API("relay._ir_pass.FuseOps") -.set_body_typed(FuseOps); - namespace transform { Pass FuseOps(int fuse_opt_level) { diff --git a/src/relay/pass/gradient.cc b/src/relay/pass/gradient.cc index 5d26f7adcff7..1abe7a94b621 100644 --- a/src/relay/pass/gradient.cc +++ b/src/relay/pass/gradient.cc @@ -26,7 +26,8 @@ #include #include #include -#include +#include +#include #include "pattern_util.h" #include "let_list.h" #include "../ir/type_functor.h" @@ -246,7 +247,7 @@ Expr FirstOrderGradient(const Expr& re, const Module& mod) { return FunctionNode::make(f->params, body, GradRetType(GetRef(f)), {}); } -TVM_REGISTER_API("relay._ir_pass.first_order_gradient") +TVM_REGISTER_API("relay._analysis.first_order_gradient") .set_body_typed(FirstOrderGradient); struct ReverseADType : TypeMutator { @@ -351,7 +352,7 @@ Expr Gradient(const Expr& re, const Module& mod) { return FunctionNode::make(f->params, body, GradRetType(GetRef(f)), {}); } -TVM_REGISTER_API("relay._ir_pass.gradient") +TVM_REGISTER_API("relay._transform.gradient") .set_body_typed(Gradient); } // namespace relay diff --git a/src/relay/pass/kind_check.cc b/src/relay/pass/kind_check.cc index 976a2ef8ec54..c0f4a7c5967d 100644 --- a/src/relay/pass/kind_check.cc +++ b/src/relay/pass/kind_check.cc @@ -32,7 +32,7 @@ * We check this by ensuring the `dtype` field of a Tensor always * contains a data type such as `int`, `float`, `uint`. */ -#include +#include #include #include "../ir/type_functor.h" @@ -183,7 +183,7 @@ Kind KindCheck(const Type& t, const Module& mod) { return kc.Check(t); } -TVM_REGISTER_API("relay._ir_pass.check_kind") +TVM_REGISTER_API("relay._analysis.check_kind") .set_body([](TVMArgs args, TVMRetValue* ret) { if (args.size() == 1) { *ret = KindCheck(args[0], ModuleNode::make({}, {})); diff --git a/src/relay/pass/mac_count.cc b/src/relay/pass/mac_count.cc index ce70eb051214..48a0dfb84746 100644 --- a/src/relay/pass/mac_count.cc +++ b/src/relay/pass/mac_count.cc @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include "pattern_util.h" @@ -188,7 +188,7 @@ int64_t GetTotalMacNumber(const Expr& expr) { return MacCounter::GetTotalMacNumber(expr); } -TVM_REGISTER_API("relay._ir_pass.GetTotalMacNumber") +TVM_REGISTER_API("relay._analysis.GetTotalMacNumber") .set_body_typed(GetTotalMacNumber); } // namespace mac_count diff --git a/src/relay/pass/match_exhaustion.cc b/src/relay/pass/match_exhaustion.cc index 173d6eacf528..cc00a54cde0a 100644 --- a/src/relay/pass/match_exhaustion.cc +++ b/src/relay/pass/match_exhaustion.cc @@ -32,7 +32,6 @@ #include #include #include -#include #include namespace tvm { @@ -236,15 +235,15 @@ Array UnmatchedCases(const Match& match, const Module& mod) { } // expose for testing only -TVM_REGISTER_API("relay._ir_pass.unmatched_cases") -.set_body_typed(const Match&, - const Module&)>([](const Match& match, - const Module& mod_ref) { - Module call_mod = mod_ref; - if (!call_mod.defined()) { - call_mod = ModuleNode::make({}, {}); - } - return UnmatchedCases(match, call_mod); - }); +TVM_REGISTER_API("relay._analysis.unmatched_cases") +.set_body_typed(const Match&, const Module&)>( + [](const Match& match, const Module& mod_ref) { + Module call_mod = mod_ref; + if (!call_mod.defined()) { + call_mod = ModuleNode::make({}, {}); + } + return UnmatchedCases(match, call_mod); + }); + } // namespace relay } // namespace tvm diff --git a/src/relay/pass/partial_eval.cc b/src/relay/pass/partial_eval.cc index e7edbb3153d8..acc60982cff4 100644 --- a/src/relay/pass/partial_eval.cc +++ b/src/relay/pass/partial_eval.cc @@ -91,7 +91,8 @@ * * These assumptions do not affect the correctness of the algorithm, however. */ -#include +#include +#include #include #include #include @@ -740,9 +741,14 @@ class PartialEvaluator : public ExprFunctor // Constant evaluate a expression. PStatic ConstEvaluate(const Expr& expr, LetList* ll) { - Expr infered = InferType(expr, Module(nullptr)); - Expr fused = FuseOps(infered, 0, Module(nullptr)); - Expr fused_infered = InferType(fused, Module(nullptr)); + std::vector passes = {transform::FuseOps(0), + transform::InferType()}; + auto mod = ModuleNode::FromExpr(expr); + auto seq = transform::Sequential(passes); + mod = seq(mod); + auto entry_func = mod->Lookup(mod->entry_func); + auto fused_infered = + expr.as() == nullptr ? entry_func->body : entry_func; return Reify(executor_(fused_infered), ll); } diff --git a/src/relay/pass/pass_manager.cc b/src/relay/pass/pass_manager.cc index a620316035c7..d63d9121fe27 100644 --- a/src/relay/pass/pass_manager.cc +++ b/src/relay/pass/pass_manager.cc @@ -573,18 +573,6 @@ class PassContext::Internal { } }; -Expr OptimizeOnExpr(const Expr& expr, const Array& passes) { - auto mod = ModuleNode::FromExpr(expr); - Sequential seq(passes); - auto pass_ctx = PassContext::Create(); - pass_ctx->opt_level = 3; - tvm::With ctx_scope(pass_ctx); - mod = seq(mod); - CHECK(mod.defined()); - auto entry_func = mod->Lookup(mod->entry_func); - return expr.as() == nullptr ? entry_func->body : entry_func; -} - TVM_REGISTER_API("relay._transform.GetCurrentPassContext") .set_body_typed(PassContext::Current); @@ -594,9 +582,6 @@ TVM_REGISTER_API("relay._transform.EnterPassContext") TVM_REGISTER_API("relay._transform.ExitPassContext") .set_body_typed(PassContext::Internal::ExitScope); -TVM_REGISTER_API("relay._transform.OptimizeOnExpr") -.set_body_typed(OptimizeOnExpr); - } // namespace transform } // namespace relay } // namespace tvm diff --git a/src/relay/pass/quantize.cc b/src/relay/pass/quantize.cc index 1503d67feaf1..7527d2a21628 100644 --- a/src/relay/pass/quantize.cc +++ b/src/relay/pass/quantize.cc @@ -27,9 +27,10 @@ */ #include #include -#include +#include #include #include +#include #include #include #include @@ -259,6 +260,13 @@ Expr QuantizeRealize(const Call& ref_call, return QRealizeIntExprNode::make(round_data, dom_scale, Float(32)); } +Expr FoldConstantOpt(const Expr& expr) { + auto mod = ModuleNode::FromExpr(expr); + mod = transform::FoldConstant()(mod); + auto entry_func = mod->Lookup(mod->entry_func); + return expr.as() == nullptr ? entry_func->body : entry_func; +} + RELAY_REGISTER_OP("relay.op.annotation.simulated_quantize") .set_attr("FQRealizeRewrite", QuantizeRealize); @@ -290,7 +298,8 @@ Expr Conv2dRealize(const Call& ref_call, Expr ret = CallNode::make(ref_call->op, {ldata, rdata}, Attrs(attrs), ref_call->type_args); - Expr dom_scale = FoldConstant(Multiply(lhs->dom_scale, rhs->dom_scale)); + Expr mul = Multiply(lhs->dom_scale, rhs->dom_scale); + Expr dom_scale = FoldConstantOpt(mul); return QRealizeIntExprNode::make(ret, dom_scale, out_dtype); } @@ -323,7 +332,8 @@ Expr DenseRealize(const Call& ref_call, Expr ret = CallNode::make(ref_call->op, {ldata, rdata}, Attrs(attrs), ref_call->type_args); - Expr dom_scale = FoldConstant(Multiply(lhs->dom_scale, rhs->dom_scale)); + Expr mul = Multiply(lhs->dom_scale, rhs->dom_scale); + Expr dom_scale = FoldConstantOpt(mul); return QRealizeIntExprNode::make(ret, dom_scale, out_dtype); } @@ -356,7 +366,8 @@ Expr MulRealize(const Call& ref_call, } Expr ret = ForwardOp(ref_call, {ldata, rdata}); - Expr dom_scale = FoldConstant(Multiply(lhs->dom_scale, rhs->dom_scale)); + Expr mul = Multiply(lhs->dom_scale, rhs->dom_scale); + Expr dom_scale = FoldConstantOpt(mul); return QRealizeIntExprNode::make(ret, dom_scale, dtype); } CHECK(!new_args[0]->derived_from() && !new_args[1]->derived_from()); diff --git a/src/relay/pass/simplify_inference.cc b/src/relay/pass/simplify_inference.cc index 6d6b24abec20..daf48c44173e 100644 --- a/src/relay/pass/simplify_inference.cc +++ b/src/relay/pass/simplify_inference.cc @@ -21,7 +21,7 @@ * Copyright (c) 2018 by Contributors * \file simplify_inference.cc */ -#include +#include #include #include #include @@ -103,9 +103,6 @@ Expr SimplifyInference(const Expr& e) { return InferenceSimplifier().Mutate(e); } -TVM_REGISTER_API("relay._ir_pass.simplify_inference") -.set_body_typed(SimplifyInference); - namespace transform { Pass SimplifyInference() { diff --git a/src/relay/pass/to_a_normal_form.cc b/src/relay/pass/to_a_normal_form.cc index b5a3f8552d8d..1b4b642eea8c 100644 --- a/src/relay/pass/to_a_normal_form.cc +++ b/src/relay/pass/to_a_normal_form.cc @@ -24,7 +24,7 @@ * * \brief Turn implicit sharing into observable sharing. */ -#include +#include #include #include #include diff --git a/src/relay/pass/to_graph_normal_form.cc b/src/relay/pass/to_graph_normal_form.cc index c1ae19e92748..f6f2a07bc80f 100644 --- a/src/relay/pass/to_graph_normal_form.cc +++ b/src/relay/pass/to_graph_normal_form.cc @@ -24,6 +24,7 @@ * * \brief Turn A normal form into graph normal form. */ +#include #include #include #include "let_list.h" diff --git a/src/relay/pass/type_infer.cc b/src/relay/pass/type_infer.cc index 4b126e5299cf..ff356cb9c9ef 100644 --- a/src/relay/pass/type_infer.cc +++ b/src/relay/pass/type_infer.cc @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include "./pass_util.h" #include "type_solver.h" @@ -813,11 +813,6 @@ Function InferType(const Function& func, return Downcast(func_ret); } -TVM_REGISTER_API("relay._ir_pass.infer_type") -.set_body_typed([](const Expr& expr, const Module& mod_ref) { - return InferType(expr, mod_ref); - }); - namespace transform { Pass InferType() { diff --git a/src/relay/pass/type_solver.cc b/src/relay/pass/type_solver.cc index 84f72e0d5a00..8289130f53d8 100644 --- a/src/relay/pass/type_solver.cc +++ b/src/relay/pass/type_solver.cc @@ -512,7 +512,7 @@ bool TypeSolver::Solve() { } // Expose type solver only for debugging purposes. -TVM_REGISTER_API("relay._ir_pass._test_type_solver") +TVM_REGISTER_API("relay._analysis._test_type_solver") .set_body([](runtime::TVMArgs args, runtime::TVMRetValue* ret) { using runtime::PackedFunc; using runtime::TypedPackedFunc; diff --git a/src/relay/pass/type_solver.h b/src/relay/pass/type_solver.h index 8b24e8605f5f..002ccac356f0 100644 --- a/src/relay/pass/type_solver.h +++ b/src/relay/pass/type_solver.h @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/relay/pass/util.cc b/src/relay/pass/util.cc index 3ec4f75cd1ad..2497197ffbe5 100644 --- a/src/relay/pass/util.cc +++ b/src/relay/pass/util.cc @@ -24,7 +24,7 @@ * * \brief Utility functions for Relay. */ -#include +#include #include #include #include "pass_util.h" @@ -274,10 +274,10 @@ tvm::Array AllVars(const Expr& expr) { return VarVisitor().All(expr); } -TVM_REGISTER_API("relay._ir_pass.free_vars") +TVM_REGISTER_API("relay._analysis.free_vars") .set_body_typed(FreeVars); -TVM_REGISTER_API("relay._ir_pass.bound_vars") +TVM_REGISTER_API("relay._analysis.bound_vars") .set_body([](TVMArgs args, TVMRetValue* ret) { NodeRef x = args[0]; if (x.as_derived()) { @@ -287,10 +287,10 @@ TVM_REGISTER_API("relay._ir_pass.bound_vars") } }); -TVM_REGISTER_API("relay._ir_pass.all_vars") +TVM_REGISTER_API("relay._analysis.all_vars") .set_body_typed(AllVars); -TVM_REGISTER_API("relay._ir_pass.free_type_vars") +TVM_REGISTER_API("relay._analysis.free_type_vars") .set_body([](TVMArgs args, TVMRetValue* ret) { NodeRef x = args[0]; Module mod = args[1]; @@ -301,7 +301,7 @@ TVM_REGISTER_API("relay._ir_pass.free_type_vars") } }); -TVM_REGISTER_API("relay._ir_pass.bound_type_vars") +TVM_REGISTER_API("relay._analysis.bound_type_vars") .set_body([](TVMArgs args, TVMRetValue* ret) { NodeRef x = args[0]; Module mod = args[1]; @@ -312,7 +312,7 @@ TVM_REGISTER_API("relay._ir_pass.bound_type_vars") } }); -TVM_REGISTER_API("relay._ir_pass.all_type_vars") +TVM_REGISTER_API("relay._analysis.all_type_vars") .set_body([](TVMArgs args, TVMRetValue* ret) { NodeRef x = args[0]; Module mod = args[1]; diff --git a/src/relay/pass/well_formed.cc b/src/relay/pass/well_formed.cc index dea937481289..bfe8865ab52f 100644 --- a/src/relay/pass/well_formed.cc +++ b/src/relay/pass/well_formed.cc @@ -22,7 +22,7 @@ * \file well_formed.cc * \brief check that expression is well formed. */ -#include +#include #include #include #include @@ -78,7 +78,7 @@ bool WellFormed(const Expr& e) { return WellFormedChecker().CheckWellFormed(e); } -TVM_REGISTER_API("relay._ir_pass.well_formed") +TVM_REGISTER_API("relay._analysis.well_formed") .set_body_typed(WellFormed); } // namespace relay diff --git a/tests/cpp/relay_build_module_test.cc b/tests/cpp/relay_build_module_test.cc index 3f46eed9f10e..a8a63dd44ef9 100644 --- a/tests/cpp/relay_build_module_test.cc +++ b/tests/cpp/relay_build_module_test.cc @@ -22,7 +22,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/tests/cpp/relay_pass_type_infer_test.cc b/tests/cpp/relay_pass_type_infer_test.cc index ffd0f7c4a26f..8257e94db197 100644 --- a/tests/cpp/relay_pass_type_infer_test.cc +++ b/tests/cpp/relay_pass_type_infer_test.cc @@ -21,7 +21,8 @@ #include #include #include -#include +#include +#include TEST(Relay, SelfReference) { using namespace tvm; @@ -32,10 +33,9 @@ TEST(Relay, SelfReference) { auto y = relay::VarNode::make("y", tensor_type); auto call = relay::CallNode::make(f, Array{ y }); auto fx = relay::FunctionNode::make(tvm::Array{ y }, call, relay::Type(), {}); - auto empty_module = - relay::ModuleNode::make(Map{}, - Map{}); - auto type_fx = relay::InferType(fx, empty_module); + auto mod = relay::ModuleNode::FromExpr(fx); + mod = relay::transform::InferType()(mod); + auto type_fx = mod->Lookup(mod->entry_func); auto expected = relay::FuncTypeNode::make(tvm::Array{ tensor_type }, tensor_type, {}, {}); CHECK(AlphaEqual(type_fx->checked_type(), expected)); diff --git a/tests/cpp/relay_transform_sequential.cc b/tests/cpp/relay_transform_sequential.cc index b61a5cc0daad..a943ba29cc92 100644 --- a/tests/cpp/relay_transform_sequential.cc +++ b/tests/cpp/relay_transform_sequential.cc @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -100,7 +100,9 @@ TEST(Relay, Sequential) { relay::FunctionNode::make(relay::FreeVars(zz), zz, relay::Type(), {}); // Infer type for the expected function. - auto expected = relay::InferType(expected_func, relay::Module(nullptr)); + auto mod1 = relay::ModuleNode::FromExpr(expected_func); + mod1 = relay::transform::InferType()(mod1); + auto expected = mod1->Lookup(mod1->entry_func); CHECK(relay::AlphaEqual(f, expected)); } diff --git a/tests/python/frontend/caffe2/model_zoo/squeezenet.py b/tests/python/frontend/caffe2/model_zoo/squeezenet.py index 74ade8989d05..3c21138343c6 100644 --- a/tests/python/frontend/caffe2/model_zoo/squeezenet.py +++ b/tests/python/frontend/caffe2/model_zoo/squeezenet.py @@ -95,7 +95,7 @@ def get_net(batch_size, image_shape, num_classes, dtype): net = relay.nn.relu(net) net = relay.nn.global_avg_pool2d(net) net = relay.nn.softmax(net, axis=1) - args = relay.ir_pass.free_vars(net) + args = relay.analysis.free_vars(net) return relay.Function(args, net) diff --git a/tests/python/frontend/caffe2/test_graph.py b/tests/python/frontend/caffe2/test_graph.py index ea3a36e60663..98f872ce19b2 100644 --- a/tests/python/frontend/caffe2/test_graph.py +++ b/tests/python/frontend/caffe2/test_graph.py @@ -16,13 +16,15 @@ # under the License. """Test graph equality of caffe2 models.""" from tvm import relay +from tvm.relay import transform from model_zoo import c2_squeezenet, relay_squeezenet -def compare_graph(f1, f2): - f1 = relay.ir_pass.infer_type(f1) - f2 = relay.ir_pass.infer_type(f2) - assert relay.ir_pass.alpha_equal(f1, f2) +def compare_graph(lhs_mod, func): + rhs_mod = relay.Module.from_expr(func) + rhs_mod = transform.InferType()(rhs_mod) + assert relay.analysis.alpha_equal(lhs_mod[lhs_mod.entry_func], + rhs_mod[rhs_mod.entry_func]) def test_squeeze_net(): @@ -31,7 +33,7 @@ def test_squeeze_net(): mod, _, = relay.frontend.from_caffe2( c2_squeezenet.init_net, c2_squeezenet.predict_net, shape_dict, dtype_dict) relay_func, _ = relay_squeezenet() - compare_graph(mod[mod.entry_func], relay_func) + compare_graph(mod, relay_func) if __name__ == '__main__': diff --git a/tests/python/frontend/mxnet/test_graph.py b/tests/python/frontend/mxnet/test_graph.py index b7d3ba4a5b60..37a46f6ce3dc 100644 --- a/tests/python/frontend/mxnet/test_graph.py +++ b/tests/python/frontend/mxnet/test_graph.py @@ -16,12 +16,11 @@ # under the License. import mxnet as mx from tvm import relay +from tvm.relay import transform import model_zoo def compare_graph(f1, f2): - f1 = relay.ir_pass.infer_type(f1) - f2 = relay.ir_pass.infer_type(f2) - assert relay.ir_pass.alpha_equal(f1, f2) + assert relay.analysis.alpha_equal(f1, f2) def test_mlp(): shape = {"data": (1, 1, 28, 28)} @@ -97,7 +96,10 @@ def relay_compose(F, **kwargs): y = F.var("y", shape=yshape) z = F.split(x, **kwargs) z = F.subtract(F.add(z[0], z[2]), y) - return relay.Function(relay.ir_pass.free_vars(z), z) + func = relay.Function(relay.analysis.free_vars(z), z) + mod = relay.Module.from_expr(func) + mod = transform.InferType()(mod) + return mod[mod.entry_func] mx_sym = mx_compose(mx, num_outputs=3, axis=1) mod, _ = relay.frontend.from_mxnet( diff --git a/tests/python/frontend/nnvm_to_relay/test_alter_conv2d.py b/tests/python/frontend/nnvm_to_relay/test_alter_conv2d.py index d3538bb0085b..d59fe1830a18 100644 --- a/tests/python/frontend/nnvm_to_relay/test_alter_conv2d.py +++ b/tests/python/frontend/nnvm_to_relay/test_alter_conv2d.py @@ -20,7 +20,8 @@ from tvm import relay from tvm import autotvm -from tvm.relay.ir_pass import infer_type, alpha_equal +from tvm.relay import transform +from tvm.relay.analysis import alpha_equal def test_alter_layout_conv2d(): @@ -57,12 +58,11 @@ def convnet(): n15 = relay.reshape(n14, newshape=[1, 1, 3, 3, 224, 224]) n16 = relay.transpose(n15, axes=[0, 1, 4, 2, 5, 3]) net = relay.reshape(n16, newshape=[1, 1, 672, 672]) - args = relay.ir_pass.free_vars(net) + args = relay.analysis.free_vars(net) return relay.Function(args, net) # orig net N = convnet() - N = infer_type(N) # trigger a test # for each known alter_conv2d @@ -75,11 +75,12 @@ def convnet(): for tgt in targets: with tvm.target.create(tgt) as target: with autotvm.tophub.context(target): - O = relay.ir_pass.alter_op_layout(N) - O = relay.ir_pass.infer_type(O) + mod = relay.Module.from_expr(N) + mod = transform.AlterOpLayout()(mod) + O = mod[mod.entry_func] # graph should differ - assert not relay.ir_pass.alpha_equal(N, O) + assert not relay.analysis.alpha_equal(N, O) if __name__ == "__main__": np.random.seed(42) diff --git a/tests/python/relay/test_adt.py b/tests/python/relay/test_adt.py index f3a08a869841..390576f87c18 100644 --- a/tests/python/relay/test_adt.py +++ b/tests/python/relay/test_adt.py @@ -14,12 +14,10 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import numpy as np import tvm from tvm import relay -from tvm.relay.ir_pass import infer_type -from tvm.relay.backend.interpreter import Value, TupleValue, ConstructorValue -from tvm.relay import testing, create_executor +from tvm.relay.backend.interpreter import ConstructorValue +from tvm.relay import create_executor from tvm.relay.prelude import Prelude from tvm.relay.testing import add_nat_definitions, count as count_, make_nat_value, make_nat_expr @@ -125,8 +123,14 @@ def test_nat_value(): def test_nat_constructor(): - assert relay.ir_pass.infer_type(z(), mod).checked_type == nat() - assert relay.ir_pass.infer_type(s(z()), mod).checked_type == nat() + func = relay.Function([], z()) + test_z = relay.GlobalVar("test_z") + mod[test_z] = func + assert mod[test_z].body.checked_type == nat() + test_sz = relay.GlobalVar("test_sz") + func = relay.Function([], s(z())) + mod[test_sz] = func + assert mod[test_sz].body.checked_type == nat() def test_double(): @@ -142,8 +146,10 @@ def test_add(): def test_list_constructor(): - a = relay.TypeVar("a") - assert relay.ir_pass.infer_type(cons(z(), nil()), mod).checked_type == l(nat()) + test_consz = relay.GlobalVar("test_consz") + func = relay.Function([], cons(z(), nil())) + mod[test_consz] = func + assert mod[test_consz].body.checked_type == l(nat()) def test_hd_tl(): expected = list(range(10)) diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index f493a9b3f537..479c4169a959 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -26,8 +26,10 @@ def get_func(shape): x = relay.var("x", shape=shape) y = relay.add(x, x) z = relay.add(y, x) - f = relay.ir_pass.infer_type(relay.Function([x], z)) - return f + f = relay.Function([x], z) + mod = relay.Module.from_expr(f) + mod = relay.transform.InferType()(mod) + return mod[mod.entry_func] z1 = engine.lower(get_func((10,)), "llvm") z2 = engine.lower(get_func((10,)), "llvm") z3 = engine.lower(get_func(()), "llvm") @@ -55,7 +57,7 @@ def test_compile_placeholder_bypass(): y = relay.var("y", shape=(2, 3)) z = relay.var("z", shape=(2, 3)) result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)]) - func = relay.Function(relay.ir_pass.free_vars(result), result) + func = relay.Function(relay.analysis.free_vars(result), result) with relay.build_config(opt_level=0): graph, lib, params = relay.build(relay.Module.from_expr(func), 'llvm') diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 18e01e39ea27..742e3b4daa9f 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -19,7 +19,6 @@ import tvm from tvm import relay from tvm.contrib import graph_runtime -from tvm.relay.ir_pass import infer_type from tvm.relay.scope_builder import ScopeBuilder from tvm.relay.op import add from tvm.relay.module import Module @@ -124,9 +123,9 @@ def test_plan_memory(): z = relay.exp(z) z = relay.exp(z) func = relay.Function([x, y], z) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.fuse_ops(func, opt_level=0) - func = relay.ir_pass.infer_type(func) + mod = relay.Module.from_expr(func) + mod = relay.transform.FuseOps(0)(mod) + func = mod[mod.entry_func] smap = relay.backend._backend.GraphPlanMemory(func) storage_ids = set() device_types = set() diff --git a/tests/python/relay/test_backend_interpreter.py b/tests/python/relay/test_backend_interpreter.py index 11ce11e48322..3c79fb760521 100644 --- a/tests/python/relay/test_backend_interpreter.py +++ b/tests/python/relay/test_backend_interpreter.py @@ -227,7 +227,7 @@ def test_tuple_passing(): gv = relay.GlobalVar('fn') mod[gv] = fn mod.entry_func = gv - mod[gv] = relay.ir_pass.infer_type(mod[gv], mod=mod) + mod = relay.transform.InferType()(mod) ctx = tvm.cpu() target = tvm.target.create('llvm') diff --git a/tests/python/relay/test_error_reporting.py b/tests/python/relay/test_error_reporting.py index c608ebba9b6d..aad4856fa943 100644 --- a/tests/python/relay/test_error_reporting.py +++ b/tests/python/relay/test_error_reporting.py @@ -19,7 +19,10 @@ def check_type_err(expr, msg): try: - expr = relay.ir_pass.infer_type(expr) + mod = relay.Module.from_expr(expr) + mod = relay.transform.InferType()(mod) + entry = mod[mod.entry_func] + expr = entry if isinstance(expr, relay.Function) else entry.body assert False except tvm.TVMError as err: assert msg in str(err) diff --git a/tests/python/relay/test_feature.py b/tests/python/relay/test_feature.py index 637e184704f2..9b5010286d4f 100644 --- a/tests/python/relay/test_feature.py +++ b/tests/python/relay/test_feature.py @@ -17,7 +17,8 @@ import tvm from tvm import relay -from tvm.relay.ir_pass import detect_feature, gradient +from tvm.relay.analysis import detect_feature +from tvm.relay.transform import gradient from tvm.relay.feature import Feature from tvm.relay.prelude import Prelude @@ -46,7 +47,9 @@ def test_ad(): t = relay.TensorType(shape, dtype) x = relay.var("x", t) func = relay.Function([x], x + x) - back_func = relay.ir_pass.infer_type(gradient(func)) + mod = relay.Module.from_expr(gradient(func)) + mod = relay.transform.InferType()(mod) + back_func = mod[mod.entry_func] feats = detect_feature(back_func) assert feats == set([ Feature.fVar, diff --git a/tests/python/relay/test_ir_bind.py b/tests/python/relay/test_ir_bind.py index 754efa557db6..df280e2fa248 100644 --- a/tests/python/relay/test_ir_bind.py +++ b/tests/python/relay/test_ir_bind.py @@ -28,11 +28,11 @@ def test_bind_params(): fexpected =relay.Function( [y], relay.add(relay.const(1, "float32"), y)) - assert relay.ir_pass.alpha_equal(fbinded, fexpected) + assert relay.analysis.alpha_equal(fbinded, fexpected) zbinded = relay.bind(z, {y: x}) zexpected = relay.add(x, x) - assert relay.ir_pass.alpha_equal(zbinded, zexpected) + assert relay.analysis.alpha_equal(zbinded, zexpected) if __name__ == "__main__": diff --git a/tests/python/relay/test_ir_nodes.py b/tests/python/relay/test_ir_nodes.py index cec277371252..b42a1e6d52c6 100644 --- a/tests/python/relay/test_ir_nodes.py +++ b/tests/python/relay/test_ir_nodes.py @@ -19,7 +19,7 @@ from tvm import relay from tvm.expr import * from tvm.relay import op -from tvm.relay.ir_pass import graph_equal +from tvm.relay.analysis import graph_equal def check_json_roundtrip(node): diff --git a/tests/python/relay/test_ir_parser.py b/tests/python/relay/test_ir_parser.py index 79b010ba0cb0..5f1f65ffb47c 100644 --- a/tests/python/relay/test_ir_parser.py +++ b/tests/python/relay/test_ir_parser.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import relay -from tvm.relay.ir_pass import alpha_equal +from tvm.relay.analysis import alpha_equal from nose.tools import nottest, raises from numpy import isclose from typing import Union diff --git a/tests/python/relay/test_ir_well_formed.py b/tests/python/relay/test_ir_well_formed.py index 3cf73ae2cc66..bee0a021ac5b 100644 --- a/tests/python/relay/test_ir_well_formed.py +++ b/tests/python/relay/test_ir_well_formed.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import relay -from tvm.relay.ir_pass import well_formed +from tvm.relay.analysis import well_formed from tvm.relay.prelude import Prelude def test_let(): diff --git a/tests/python/relay/test_op_grad_level1.py b/tests/python/relay/test_op_grad_level1.py index 072271218bdf..7da623a45ce6 100644 --- a/tests/python/relay/test_op_grad_level1.py +++ b/tests/python/relay/test_op_grad_level1.py @@ -14,16 +14,24 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import tvm import numpy as np +import tvm from tvm import relay -from tvm.relay.ir_pass import gradient, infer_type +from tvm.relay.transform import gradient from tvm.relay.testing import ctx_list + +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = relay.transform.InferType()(mod) + return mod[mod.entry_func] + + def sigmoid(x): one = np.ones_like(x) return one / (one + np.exp(-x)) + def relu(x): x_copy = np.copy(x) np.maximum(x_copy, 0, x_copy) @@ -41,7 +49,7 @@ def check_single_op(opfunc, ref): data = np.random.rand(*shape).astype(dtype) ref_grad = ref(data) fwd_func = relay.Function([x], y) - bwd_func = infer_type(gradient(fwd_func)) + bwd_func = run_infer_type(gradient(fwd_func)) for target, ctx in ctx_list(): intrp = relay.create_executor(ctx=ctx, target=target) @@ -73,7 +81,7 @@ def check_binary_op(opfunc, ref): y_data = np.random.rand(*s).astype(t.dtype) ref_grad0, ref_grad1 = ref(x_data, y_data) fwd_func = relay.Function([x, y], z) - bwd_func = infer_type(gradient(fwd_func)) + bwd_func = run_infer_type(gradient(fwd_func)) for target, ctx in ctx_list(): intrp = relay.create_executor(ctx=ctx, target=target) diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 202464493d4b..8baec8c79e9a 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -14,13 +14,19 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import math -import tvm import numpy as np +import tvm from tvm import relay +from tvm.relay import transform from tvm.relay.testing import ctx_list import topi.testing +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def sigmoid(x): one = np.ones_like(x) return one / (one + np.exp(-x)) @@ -44,7 +50,8 @@ def check_single_op(opfunc, ref): # test printer assert ("{}(%x)".format(y.op.name)) in y.astext() # test type inference - assert relay.ir_pass.infer_type(y).checked_type == tp + yy = run_infer_type(y) + assert yy.checked_type == tp if ref is not None: data = np.random.rand(*shape).astype(dtype) @@ -84,7 +91,8 @@ def check_binary_op(opfunc, ref): z = opfunc(x, y) # test printer assert ("{}(%x, %y)".format(z.op.name)) in z.astext() - assert relay.ir_pass.infer_type(z).checked_type == t1 + zz = run_infer_type(z) + assert zz.checked_type == t1 if ref is not None: t1 = relay.TensorType((5, 10, 5)) @@ -134,7 +142,7 @@ def test_bias_add(): x = relay.var("x", shape=xshape) bias = relay.var("bias") z = relay.nn.bias_add(x, bias) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert "axis=" not in zz.astext() assert zz.args[1].checked_type == relay.TensorType(bshape) @@ -153,8 +161,8 @@ def test_expand_dims_infer_type(): x = relay.var("x", shape=(n, t, d)) y = relay.expand_dims(x, axis=2) assert "axis=2" in y.astext() - checked = relay.ir_pass.infer_type(y) - assert checked.checked_type == relay.TensorType((n, t, 1, 100)) + yy = run_infer_type(y) + assert yy.checked_type == relay.TensorType((n, t, 1, 100)) def test_softmax(): @@ -162,7 +170,7 @@ def test_softmax(): x = relay.var("x", shape=shape) y = relay.nn.softmax(x, axis=1) assert "nn.softmax" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(shape) func = relay.Function([x], y) x_data = np.random.uniform(size=shape).astype("float32") @@ -178,7 +186,7 @@ def test_log_softmax(): x = relay.var("x", shape=shape) y = relay.nn.log_softmax(x, axis=1) assert "nn.log_softmax" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(shape) func = relay.Function([x], y) x_data = np.random.uniform(size=shape).astype("float32") @@ -195,16 +203,16 @@ def test_concatenate(): y = relay.var("y", shape=(n, t, d)) z = relay.concatenate((x, y), axis=-1) assert "axis=" in z.astext() - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, t, 200)) x = relay.exp(x) z = relay.concatenate((x, y), axis=2) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, t, 200)) z = relay.concatenate((x, y), axis=1) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, t + t, 100)) x = relay.var("x", shape=(10, 5)) @@ -233,7 +241,7 @@ def test_dropout(): x = relay.var("x", input_ty) y = relay.nn.dropout(x, rate=0.75) assert "rate=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == input_ty @@ -246,7 +254,7 @@ def test_batch_norm(): moving_var = relay.var("moving_var", relay.TensorType((2,))) y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, center=False, scale=False) - yy = relay.ir_pass.infer_type(y.astuple()) + yy = run_infer_type(y.astuple()) assert "center=" in yy.astext() assert yy.checked_type == relay.ty.TupleType(tvm.convert([ relay.TensorType((3, 2, 1), "float32"), @@ -261,7 +269,7 @@ def test_batch_norm(): y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False) - yy = relay.ir_pass.infer_type(y.astuple()) + yy = run_infer_type(y.astuple()) assert yy.checked_type == relay.ty.TupleType(tvm.convert([ relay.ty.TensorType((3, 2, 1), "float32"), relay.ty.TensorType((3,), "float32"), @@ -276,7 +284,7 @@ def test_batch_norm(): moving_var = relay.var("moving_var", relay.TensorType((3,))) y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False) - yy = relay.ir_pass.infer_type(y.astuple()) + yy = run_infer_type(y.astuple()) assert yy.checked_type == relay.ty.TupleType(tvm.convert([ relay.ty.TensorType((1, 2, 3), "float32"), relay.ty.TensorType((3,), "float32"), @@ -290,7 +298,7 @@ def test_dense(): w = relay.var("w", relay.TensorType((2, w), "float32")) y = relay.nn.dense(x, w, units=2) "units=2" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, 2), "float32") n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 @@ -298,14 +306,14 @@ def test_dense(): wh, ww = tvm.var("wh"), tvm.var("ww") w = relay.var("w", relay.TensorType((ww, wh), "float32")) y = relay.nn.dense(x, w) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, ww), "float32") n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) w = relay.var("w", relay.IncompleteType()) y = relay.nn.dense(x, w, units=2) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, 2), "float32") x = relay.var("x", shape=(10, 5)) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index f904fb01fdb9..bcf6b7f80abd 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -20,10 +20,17 @@ import tvm import topi.testing from tvm import relay +from tvm.relay import transform from tvm.relay.testing import ctx_list import topi import topi.testing +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_collapse_sum_like(): shape = (3, 4, 5, 6) shape_like = (4, 5, 6) @@ -31,7 +38,7 @@ def test_collapse_sum_like(): x = relay.Var("x", relay.ty.TensorType(shape , dtype)) y = relay.Var("y", relay.ty.TensorType(shape_like, dtype)) z = relay.collapse_sum_like(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) func = relay.Function([x, y], z) @@ -50,7 +57,7 @@ def test_broadcast_to(): dtype = "float32" x = relay.Var("x", relay.ty.TensorType(shape , dtype)) z = relay.broadcast_to(x, shape=shape_like) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) func = relay.Function([x], z) @@ -69,7 +76,7 @@ def test_broadcast_to_like(): x = relay.Var("x", relay.ty.TensorType(shape , dtype)) y = relay.Var("y", relay.ty.TensorType(shape_like, dtype)) z = relay.broadcast_to_like(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) func = relay.Function([x, y], z) @@ -106,7 +113,7 @@ def verify_slice_like(data, slice_like, axes, output, dtype="float32"): x = relay.var("data", relay.TensorType(data, dtype)) y = relay.var("slice_like", relay.TensorType(slice_like, dtype)) z = relay.slice_like(x, y, axes) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) if axes: assert "axes" in z.astext() assert zz.checked_type == relay.ty.TensorType(output, dtype) @@ -144,7 +151,7 @@ def test_reverse_reshape(): def verify_reverse_reshape(shape, newshape, oshape): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.reverse_reshape(x, newshape=newshape) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert "newshape=" in z.astext() assert zz.checked_type == relay.ty.TensorType(oshape, "float32") @@ -166,7 +173,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): x = relay.var("x", relay.TensorType(x_shape, dtype)) y = relay.var("y", relay.TensorType(y_shape, dtype)) z = relay.nn.batch_matmul(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(out_shape, dtype) func = relay.Function([x, y], z) @@ -185,7 +192,7 @@ def test_batch_matmul(): x = relay.var("x", relay.TensorType((b, m, k), "float32")) y = relay.var("y", relay.TensorType((b, n, k), "float32")) z = relay.nn.batch_matmul(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((b, m, n), "float32") verify_batch_matmul((1, 16, 32), (1, 16, 32), (1, 16, 16)) @@ -197,7 +204,7 @@ def test_shape_of(): shape = (10, 5, 12) x = relay.var("x", shape=shape) func = relay.Function([x], relay.op.shape_of(x)) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) x_data = np.random.rand(*shape).astype('float32') for target, ctx in ctx_list(): # Because using graph executor, this op will be optimized after @@ -256,7 +263,8 @@ def _verify(data_shape, mask_value, axis, dtype, itype): data = relay.var("data", relay.TensorType(data_shape, dtype)) valid_length = relay.var("valid_length", relay.TensorType((nbatch,), itype)) out = relay.sequence_mask(data, valid_length, mask_value, axis) - assert relay.ir_pass.infer_type(out).checked_type == relay.ty.TensorType(data_shape, dtype) + checked = run_infer_type(out) + assert checked.checked_type == relay.ty.TensorType(data_shape, dtype) func = relay.Function([data, valid_length], out) data_np = np.random.uniform(size=data_shape).astype(dtype) valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index c8f5b1d27a2a..722e8d178fab 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -16,12 +16,19 @@ # under the License. """ Support level2 operator test cases. """ +import numpy as np import tvm from tvm import relay +from tvm.relay import transform from tvm.relay.testing import ctx_list -import numpy as np import topi.testing +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_conv2d_infer_type(): # symbolic in batch dimension n, c, h, w = tvm.var("n"), 10, 224, 224 @@ -31,7 +38,7 @@ def test_conv2d_infer_type(): kernel_size=(3, 3), padding=(1, 1), channels=2) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224), "float32") assert yy.args[1].checked_type == relay.TensorType( @@ -44,7 +51,7 @@ def test_conv2d_infer_type(): w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222), "int32") @@ -59,7 +66,7 @@ def test_conv2d_infer_type(): data_layout="NCHW4n4c", kernel_layout="OIHW4o4i", out_dtype="int32") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4, 224, 224, 4, 4), "int32") assert yy.args[1].checked_type == relay.TensorType( @@ -75,7 +82,7 @@ def test_conv2d_infer_type(): channels=16, data_layout="NHWC", out_dtype="int32") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h, w, 16), "int32") @@ -169,7 +176,7 @@ def test_conv2d_transpose_infer_type(): padding=(1, 1), channels=15) assert "channels=15" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 10, 12), "float32") assert yy.args[1].checked_type == relay.TensorType( @@ -183,7 +190,7 @@ def test_conv2d_transpose_infer_type(): output_padding=(1, 1), channels=11, data_layout="NHWC") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 15, 11), "float32") @@ -219,12 +226,12 @@ def test_upsampling_infer_type(): x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="BILINEAR") "method=\"BINLINEAR\"" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h*2, w*2), "float32") n, c = tvm.var("n"), tvm.var("c") x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32")) y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="BILINEAR") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32") @@ -233,7 +240,7 @@ def _test_pool2d(opfunc, reffunc): x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32") # test execution dtype = "float32" @@ -253,13 +260,13 @@ def _test_global_pool2d(opfunc, reffunc): n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) y = opfunc(x, layout="NHWC") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32") n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32") # test execution dtype = "float32" @@ -320,17 +327,17 @@ def test_flatten_infer_type(): d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4") x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32")) y = relay.nn.batch_flatten(x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32") x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32")) y = relay.nn.batch_flatten(x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), "float32") x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32")) y = relay.nn.batch_flatten(x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32") shape = (1, 5, 10, 10) @@ -338,7 +345,7 @@ def test_flatten_infer_type(): dtype = "float32" x = relay.var("x", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) - yy = relay.ir_pass.infer_type(z) + yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) @@ -358,14 +365,14 @@ def test_pad_infer_type(): t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) "pad_width=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32") # some symbolic values n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32") def test_pad_run(): @@ -389,7 +396,7 @@ def test_lrn(): x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) "alpha=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w)) shape = (1, 5, 10, 10) @@ -401,7 +408,7 @@ def test_lrn(): alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) - yy = relay.ir_pass.infer_type(z) + yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) @@ -420,7 +427,7 @@ def test_l2_normalize(): x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) "axis=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w)) shape = (1, 5, 10, 10) @@ -429,7 +436,7 @@ def test_l2_normalize(): eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) - yy = relay.ir_pass.infer_type(z) + yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) @@ -477,7 +484,7 @@ def get_shape(): ishape, oshape = get_shape() x = relay.var("x", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape = (1,) + ishape x = relay.var("x", shape=dshape) diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index fcd4caff2695..575996fbe61e 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -16,17 +16,23 @@ # under the License. """ Support level3 operator test cases. """ -import tvm import numpy as np +from nose.tools import raises +import tvm from tvm import relay -from tvm.relay import create_executor +from tvm.relay import create_executor, transform from tvm.relay.testing import ctx_list -from nose.tools import raises + +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def test_zeros_ones(): for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: y = op(shape=(124, 50), dtype="float64") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((124, 50), "float64") intrp = create_executor() intrp_res = intrp.evaluate(y).asnumpy() @@ -46,7 +52,7 @@ def test_unary_identity(): shape = (8, 9, 4) x = relay.var("x", relay.TensorType(shape, "float32")) y = op(x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(shape, "float32") if ref is not None: @@ -59,20 +65,20 @@ def test_unary_identity(): def test_cast(): x = relay.var("x", relay.TensorType((8, 9, 4), "float32")) y = x.astype("int32") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert "dtype=" in yy.astext() assert yy.checked_type == relay.TensorType((8, 9, 4), "int32") x = relay.var("x", relay.TensorType((8, 9, 4), "float32")) y = relay.cast(x, "int32") - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert "dtype=" in yy.astext() assert yy.checked_type == relay.TensorType((8, 9, 4), "int32") def test_clip(): a = relay.var("a", relay.TensorType((10, 4), "float32")) y = relay.clip(a, 1., 4.) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((10, 4), "float32") data = np.random.rand(10, 4).astype('float32') @@ -105,13 +111,13 @@ def test_transpose_infer_type(): x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.transpose(x, axes=(1, 0, 2)) assert "axes=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (t, n, 100), "float32") y = relay.transpose(x) assert "axes=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (100, t, n), "float32") @@ -138,7 +144,7 @@ def test_squeeze_infer_type(): x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.squeeze(x, axis=(2,)) assert "axis=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4), "float32") @@ -146,7 +152,7 @@ def test_squeeze_infer_type(): x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.squeeze(x) assert "axis=" not in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (4,), "float32") @@ -156,7 +162,7 @@ def test_squeeze_bad_axes_infer_type(): n, t, d = 1, 4, 1 x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.squeeze(x, axis=(1,)) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) def test_reshape_infer_type(): @@ -164,7 +170,7 @@ def test_reshape_infer_type(): x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32")) y = relay.reshape(x, newshape=(n, t, 2000)) assert "newshape=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, t, 2000), "float32") @@ -172,7 +178,7 @@ def test_reshape(): def verify_reshape(shape, newshape, oshape): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.reshape(x, newshape=newshape) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert "newshape=" in z.astext() assert zz.checked_type == relay.ty.TensorType(oshape, "float32") @@ -205,7 +211,7 @@ def test_reshape_like_infer_type(): x = relay.var("x", relay.TensorType((1, 2, 3), "float32")) y = relay.var("y", relay.TensorType((1,6), "float32")) z = relay.reshape_like(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((1, 6), "float32") # symbolic shape @@ -213,7 +219,7 @@ def test_reshape_like_infer_type(): x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.var("y", relay.TensorType((1, 8, 8), "float32")) z = relay.reshape_like(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((1, 8, 8), "float32") @@ -226,7 +232,7 @@ def verify_reshape_like(shape, oshape): x = relay.var("x", relay.TensorType(shape, "float32")) y = relay.var("x", relay.TensorType(oshape, "float32")) z = relay.reshape_like(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32") func = relay.Function([x, y], z) @@ -245,8 +251,7 @@ def verify_take(dshape, indices_shape, oshape, axis=None): x = relay.var("x", relay.TensorType(dshape, "float32")) indices = relay.var("indices", relay.TensorType(indices_shape, "int32")) y = relay.take(x, indices, axis=axis) - y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(oshape, "float32") d1, d2, d3 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3") @@ -301,8 +306,7 @@ def test_split_infer_type(): def verify_split(dshape, indices_or_sections, ret_type, axis=None): x = relay.var("x", relay.ty.TensorType(dshape, "float32")) y = relay.split(x, indices_or_sections, axis=axis) - y.astext() - yy = relay.ir_pass.infer_type(y.astuple()) + yy = run_infer_type(y.astuple()) assert yy.checked_type == ret_type d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4") @@ -347,14 +351,14 @@ def test_full_infer_type(): # default settings: match input dtype x = relay.var("x", relay.TensorType((), "int8")) y = relay.full(x, ()) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((), "int8") # change the shape and dtype x = relay.var("x", relay.TensorType((), "float32")) y = relay.full(x, (1, 2), "int8") "shape=" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((1, 2), "int8") @@ -378,7 +382,7 @@ def test_full_like_infer_type(): base = relay.var("base", relay.TensorType((1, 2, 3), "float32")) fill = relay.var("fill", relay.TensorType((), "float32")) y = relay.full_like(base, fill) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((1, 2, 3), "float32") # symbolic shape @@ -386,7 +390,7 @@ def test_full_like_infer_type(): base = relay.var("base", relay.TensorType((n, c, h, w), "float32")) fill = relay.var("fill", relay.TensorType((), "float32")) y = relay.full_like(base, fill) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, w), "float32") @@ -414,7 +418,7 @@ def test_infer_type_leaky_relu(): x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.leaky_relu(x, alpha=0.1) "alpha=0.1" in y.astext() - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, w), "float32") shape = (1, 5, 10, 10) @@ -422,8 +426,8 @@ def test_infer_type_leaky_relu(): x = relay.var("x", relay.TensorType(shape, dtype)) z = relay.nn.leaky_relu(x, alpha=0.1) assert "alpha=0.1" in z.astext() - yy = relay.ir_pass.infer_type(z) - assert yy.checked_type == relay.TensorType(shape, dtype) + zz = run_infer_type(z) + assert zz.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = np.where(x_data > 0, x_data, x_data * 0.1) @@ -443,7 +447,7 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): else: y = relay.var("alpha", relay.IncompleteType()) z = relay.nn.prelu(x, y, axis=axis) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) if axis != 1: assert "axis" in z.astext() assert zz.checked_type == relay.ty.TensorType(output, dtype) @@ -577,7 +581,7 @@ def test_reverse(): def verify_reverse(dshape, axis): x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.reverse(x, axis=axis) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index da0fe01063f4..9bab5d87389a 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -17,9 +17,16 @@ import tvm import numpy as np from tvm import relay +from tvm.relay import transform from tvm.relay.testing import ctx_list import topi.testing +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_binary_op(): def check_binary_op(opfunc, ref): n = tvm.var("n") @@ -30,7 +37,8 @@ def check_binary_op(opfunc, ref): z = opfunc(x, y) # test printer assert ("{}(%x, %y)".format(z.op.name)) in z.astext() - assert relay.ir_pass.infer_type(z).checked_type == t1 + zz = run_infer_type(z) + assert zz.checked_type == t1 if ref is not None: t1 = relay.TensorType((5, 10, 5)) @@ -62,8 +70,7 @@ def test_cmp_type(): x = relay.var("x", relay.TensorType((10, 4), "float32")) y = relay.var("y", relay.TensorType((5, 10, 1), "float32")) z = op(x, y) - z.astext() - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((5, 10, 4), "bool") if ref is not None: @@ -94,7 +101,7 @@ def test_binary_int_broadcast(): x = relay.var("x", relay.TensorType((10, 4), "int32")) y = relay.var("y", relay.TensorType((5, 10, 1), "int32")) z = op(x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((5, 10, 4), "int32") if ref is not None: @@ -120,7 +127,7 @@ def test_where(): x = relay.var("x", relay.TensorType(shape, dtype)) y = relay.var("y", relay.TensorType(shape, dtype)) z = relay.where(cond, x, y) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(shape, dtype) func = relay.Function([cond, x, y], z) @@ -142,7 +149,7 @@ def verify_reduce(funcs, data, axis, keepdims, exclude, output, dtype="float32") x = relay.var("x", relay.TensorType(data, dtype)) z = test_func(x, axis, keepdims, exclude) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) if axis: assert "axis=" in z.astext() if keepdims: @@ -224,7 +231,7 @@ def verify(dshape, begin, end, strides, output, test_ref=True): x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.strided_slice(x, begin=begin, end=end, strides=strides) func = relay.Function([x], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) text = func.astext() assert "begin=" in text assert "end=" in text diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index 3d9ec6dde4ad..cd008e3d19a3 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -20,21 +20,28 @@ import numpy as np import tvm from tvm import relay +from tvm.relay import transform from tvm.relay.testing import ctx_list import topi.testing +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_resize_infer_type(): n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) th, tw = tvm.var("th"), tvm.var("tw") z = relay.image.resize(x, (th, tw)) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, c, th, tw), "int8") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) z= relay.image.resize(x, (100, 200), "NCHW", "BILINEAR", False) assert "size=" in z.astext() - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8") def test_resize(): @@ -52,7 +59,7 @@ def verify_resize(dshape, scale, method, layout): x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize(x, size, layout, method, False) assert "size=" in z.astext() - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) @@ -109,7 +116,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), check_type_only=False): z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) if check_size: assert "sizes=" in z.astext() assert zz.checked_type == relay.TensorType( @@ -121,7 +128,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") func = relay.Function([x], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) @@ -176,7 +183,7 @@ def verify_get_valid_counts(dshape, score_threshold, id_index, score_index): z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index) assert "score_threshold" in z.astext() func = relay.Function([x], z.astuple()) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) for target, ctx in ctx_list(): if target == 'cuda': return @@ -205,8 +212,8 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, top_k = top_k) assert "iou_threshold" in z.astext() assert "iou_threshold" in z_indices.astext() - zz = relay.ir_pass.infer_type(z) - zz_indices = relay.ir_pass.infer_type(z_indices) + zz = run_infer_type(z) + zz_indices = run_infer_type(z_indices) assert zz.checked_type == relay.ty.TensorType(dshape, "float32") assert zz_indices.checked_type == relay.ty.TensorType((dshape[0], dshape[1]), "int32") @@ -214,9 +221,9 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, return func = relay.Function([x0, x1], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) func_indices = relay.Function([x0, x1], z_indices) - func_indices = relay.ir_pass.infer_type(func_indices) + func_indices = run_infer_type(func_indices) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x0_data, x1_data) @@ -288,7 +295,7 @@ def test_default_value(): mtl = relay.vision.multibox_transform_loc( cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors) - ret = relay.ir_pass.infer_type(mtl.astuple()) + ret = run_infer_type(mtl.astuple()) ref_type = relay.ty.TupleType( tvm.convert([ relay.ty.TensorType((1, num_anchors, 6), "float32"), @@ -299,7 +306,7 @@ def test_default_value(): nms = relay.vision.non_max_suppression(mtl[0], mtl[1], return_indices=False) func = relay.Function([cls_prob, loc_pred, anchors], nms) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds, @@ -330,7 +337,7 @@ def test_threshold(): anchor=anchors, threshold=threshold, variances=variances) - ret = relay.ir_pass.infer_type(ret.astuple()) + ret = run_infer_type(ret.astuple()) ref_type = relay.ty.TupleType( tvm.convert([ relay.ty.TensorType((n, num_anchors, 6), "float32"), @@ -349,15 +356,14 @@ def verify_roi_align(data_shape, rois_shape, pooled_size, spatial_scale, sample_ z = relay.vision.roi_align(data, rois, pooled_size=(pooled_size, pooled_size), spatial_scale=spatial_scale, sample_ratio=sample_ratio, layout="NCHW") - zz = relay.ir_pass.infer_type(z) - + zz = run_infer_type(z) batch, channel, in_size, _ = data_shape num_roi = rois_shape[0] assert zz.checked_type == relay.ty.TensorType( (num_roi, channel, pooled_size, pooled_size), "float32") func = relay.Function([data, rois], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi) @@ -382,15 +388,14 @@ def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale): rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32")) z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size), spatial_scale=spatial_scale, layout="NCHW") - zz = relay.ir_pass.infer_type(z) - + zz = run_infer_type(z) batch, channel, in_size, _ = data_shape num_roi = rois_shape[0] assert zz.checked_type == relay.ty.TensorType( (num_roi, channel, pooled_size, pooled_size), "float32") func = relay.Function([data, rois], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32') @@ -414,12 +419,11 @@ def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs): bbox_pred = relay.var("bbox_pred", relay.ty.TensorType(np_bbox_pred.shape, "float32")) im_info = relay.var("im_info", relay.ty.TensorType(np_im_info.shape, "float32")) z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs) - zz = relay.ir_pass.infer_type(z) - + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(np_out.shape, "float32") func = relay.Function([cls_prob, bbox_pred, im_info], z) - func = relay.ir_pass.infer_type(func) + func = run_infer_type(func) for target in ['cuda']: if not tvm.module.enabled(target): print("Skip test because %s is not enabled." % target) @@ -478,7 +482,7 @@ def test_yolo_reorg_infer_shape(): def verify_yolo_reorg(shape, stride, out_shape): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.vision.yolo_reorg(x, stride=stride) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(out_shape, "float32") @@ -493,7 +497,7 @@ def verify_yolo_reorg(shape, stride): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.vision.yolo_reorg(x, stride=stride) - zz = relay.ir_pass.infer_type(z) + zz = run_infer_type(z) assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32") @@ -527,7 +531,7 @@ def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, gro weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) out_shape = (batch, out_channel, size, size) offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3]) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(out_shape) assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type assert yy.args[2].checked_type == relay.TensorType(weight_shape) diff --git a/tests/python/relay/test_pass_alpha_equal.py b/tests/python/relay/test_pass_alpha_equal.py index 0e0036565363..de764f849c1c 100644 --- a/tests/python/relay/test_pass_alpha_equal.py +++ b/tests/python/relay/test_pass_alpha_equal.py @@ -14,17 +14,17 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import tvm import numpy as np +import tvm from tvm import relay -from tvm.relay import ir_pass +from tvm.relay import analysis def alpha_equal(x, y): """ Wrapper around alpha equality which ensures that the hash function respects equality. """ - return ir_pass.alpha_equal(x, y) and ir_pass.structural_hash(x) == ir_pass.structural_hash(y) + return analysis.alpha_equal(x, y) and analysis.structural_hash(x) == analysis.structural_hash(y) def test_tensor_type_alpha_equal(): t1 = relay.TensorType((3, 4), "float32") @@ -604,14 +604,14 @@ def test_hash_unequal(): y2 = relay.var("y2", shape=(10, 10), dtype="float32") func2 = relay.Function([x2, y2], relay.add(x2, y2)) - assert ir_pass.structural_hash(func1) == ir_pass.structural_hash(func2) + assert analysis.structural_hash(func1) == analysis.structural_hash(func2) # func3 is same as func1 but with different var shapes x3 = relay.var("x3", shape=(20, 10), dtype="float32") y3 = relay.var("y3", shape=(20, 10), dtype="float32") func3 = relay.Function([x3, y3], relay.add(x3, y3)) - assert not ir_pass.structural_hash(func1) == ir_pass.structural_hash(func3) + assert not analysis.structural_hash(func1) == analysis.structural_hash(func3) if __name__ == "__main__": test_tensor_type_alpha_equal() diff --git a/tests/python/relay/test_pass_alter_op_layout.py b/tests/python/relay/test_pass_alter_op_layout.py index 7d022ba25570..65fd0b0819cc 100644 --- a/tests/python/relay/test_pass_alter_op_layout.py +++ b/tests/python/relay/test_pass_alter_op_layout.py @@ -19,7 +19,18 @@ from tvm import relay from tvm.relay.op import register_alter_op_layout -from tvm.relay.ir_pass import * +from tvm.relay import transform, analysis + + +def run_opt_pass(expr, passes): + passes = passes if isinstance(passes, list) else [passes] + mod = relay.Module.from_expr(expr) + seq = transform.Sequential(passes) + with transform.PassContext(opt_level=3): + mod = seq(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_alter_op(): """Test directly replacing an operator with a new one""" @@ -52,13 +63,10 @@ def expected(): return y a = before() - a = infer_type(a) - a = alter_op_layout(a) + a = run_opt_pass(a, transform.AlterOpLayout()) + b = run_opt_pass(expected(), transform.InferType()) - b = expected() - b = infer_type(b) - - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_return_none(): @@ -77,12 +85,11 @@ def alter_conv2d(attrs, inputs, tinfos): return None a = before() - a = infer_type(a) - a = alter_op_layout(a) + a = run_opt_pass(a, transform.AlterOpLayout()) b = before() - b = infer_type(b) - assert alpha_equal(a, b), "Actual = \n" + str(a) + b = run_opt_pass(b, transform.InferType()) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) assert(called[0]) @@ -102,7 +109,7 @@ def before(): y = relay.nn.max_pool2d(y, pool_size=(2, 2)) y = relay.cast(y, 'int32') y = relay.nn.batch_flatten(y) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=102) @@ -135,20 +142,17 @@ def expected(): y = relay.cast(y, 'int32') y = relay.layout_transform(y, "NCHW16c", "NCHW") y = relay.nn.batch_flatten(y) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_dual_path(): @@ -172,7 +176,7 @@ def before(): y1 = relay.nn.relu(y1) y2 = relay.nn.batch_flatten(y) ret = relay.Tuple([y1, y2]) - y = relay.Function(free_vars(ret), ret) + y = relay.Function(analysis.free_vars(ret), ret) return y @register_alter_op_layout("nn.conv2d", level=103) @@ -203,18 +207,16 @@ def expected(): y2 = relay.layout_transform(y, "NCHW16c", "NCHW") y2 = relay.nn.batch_flatten(y2) ret = relay.Tuple([y1, y2]) - y = relay.Function(free_vars(ret), ret) + y = relay.Function(analysis.free_vars(ret), ret) return y a = before() - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, transform.AlterOpLayout()) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_resnet(): """Test alternating the layout of a residual block @@ -236,7 +238,7 @@ def before(): y2 = relay.nn.relu(y2) y = y + y2 y = relay.nn.global_max_pool2d(y) - return relay.Function(free_vars(y), y) + return relay.Function(analysis.free_vars(y), y) @register_alter_op_layout("nn.conv2d", level=104) def alter_conv2d(attrs, inputs, tinfos): @@ -264,17 +266,15 @@ def expected(): y = y + y2 y = relay.nn.global_max_pool2d(y, layout="NCHW16c") y = relay.layout_transform(y, "NCHW16c", "NCHW") - return relay.Function(free_vars(y), y) + return relay.Function(analysis.free_vars(y), y) a = before() - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, transform.AlterOpLayout()) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_broadcast_op(): @@ -287,7 +287,7 @@ def before(): y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1)) y = relay.nn.bias_add(y, bias) # test broadcasting to lhs y = relay.multiply(scale, y) # test broadcasting to rhs - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=105) @@ -311,20 +311,17 @@ def expected(): y = relay.add(y, bias) # test broadcasting to lhs y = relay.multiply(scale, y) # test broadcasting to rhs y = relay.layout_transform(y, "NCHW16c", "NCHW") - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_scalar(): """Test alternating the layout of a conv2d. @@ -335,7 +332,7 @@ def before(): weight = relay.var("weight") y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1)) y = relay.add(y, relay.const(1, "float32")) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=106) @@ -358,20 +355,17 @@ def expected(): y = relay.add(y, relay.const(1.0, "float32")) y = relay.layout_transform(y, "NCHW16c", "NCHW") - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_concatenate(): """ """ @@ -388,7 +382,7 @@ def before(): kernel_size=(3, 3), padding=(1, 1)) ret = relay.concatenate([y, y1], axis=1) - y = relay.Function(free_vars(ret), ret) + y = relay.Function(analysis.free_vars(ret), ret) return y @register_alter_op_layout("nn.conv2d", level=107) @@ -415,18 +409,16 @@ def expected(): data_layout='NCHW16c') ret = relay.concatenate([y, y1], axis=1) ret = relay.layout_transform(ret, "NCHW16c", "NCHW") - y = relay.Function(free_vars(ret), ret) + y = relay.Function(analysis.free_vars(ret), ret) return y a = before() - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, transform.AlterOpLayout()) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_nchw_upsamping_op(): @@ -437,7 +429,7 @@ def before(): y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1)) y = relay.nn.upsampling(y, scale=2) y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2)) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=108) @@ -456,21 +448,17 @@ def expected(): y = relay.nn.upsampling(y, scale=2, layout="NCHW16c") y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout='NCHW16c') y = relay.layout_transform(y, "NCHW16c", "NCHW") - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_strided_slice(): @@ -480,7 +468,7 @@ def before(): weight = relay.var('weight', shape=(32, 32, 3, 3)) y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1)) y = relay.strided_slice(y, begin=[0, 16], end=[None, None]) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=109) @@ -498,21 +486,17 @@ def expected(): data_layout="NCHW4c") y = relay.strided_slice(y, begin=[0, 4], end=[None, 8]) y = relay.layout_transform(y, "NCHW4c", "NCHW") - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert alpha_equal(a, b), "Actual = \n" + str(a) + assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) def test_alter_layout_depthwise_conv2d(): """Test depthwise_conv2d operator""" @@ -520,7 +504,7 @@ def before(): x = relay.var("x", shape=(1, 32, 56, 56)) w = relay.var("w", shape=(32, 1, 3, 3)) y = relay.nn.conv2d(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3), groups=32) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y import topi @@ -538,20 +522,17 @@ def expected(): groups=32, data_layout="NCHW8c", kernel_layout="OIHW1i8o", out_layout="NCHW8c") y = relay.layout_transform(y, "NCHW8c", "NCHW") - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), + transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert(alpha_equal(a, b)) + assert(analysis.alpha_equal(a, b)) def test_alter_layout_prelu(): """Test PRelu operator""" @@ -561,7 +542,7 @@ def before(): alpha = relay.var("alpha", relay.IncompleteType()) y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1)) y = relay.nn.prelu(y, alpha) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y @register_alter_op_layout("nn.conv2d", level=111) @@ -584,20 +565,16 @@ def expected(): data_layout="NCHW16c") y = relay.layout_transform(y, "NCHW16c", "NCHW") y = relay.nn.prelu(y, alpha) - y = relay.Function(free_vars(y), y) + y = relay.Function(analysis.free_vars(y), y) return y a = before() - a = infer_type(a) - a = canonicalize_ops(a) - a = infer_type(a) - a = alter_op_layout(a) - a = infer_type(a) + a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()]) b = expected() - b = infer_type(b) + b = run_opt_pass(b, transform.InferType()) - assert(alpha_equal(a, b)) + assert(analysis.alpha_equal(a, b)) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_annotation.py b/tests/python/relay/test_pass_annotation.py index 84a5c8749079..86ebf73d3dd6 100644 --- a/tests/python/relay/test_pass_annotation.py +++ b/tests/python/relay/test_pass_annotation.py @@ -22,6 +22,16 @@ from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.expr_functor import ExprMutator +from tvm.relay import transform + + +def run_opt_pass(expr, passes): + passes = passes if isinstance(passes, list) else [passes] + mod = relay.Module.from_expr(expr) + seq = transform.Sequential(passes) + with transform.PassContext(opt_level=3): + mod = seq(mod) + return mod[mod.entry_func] def test_redundant_annotation(): @@ -39,9 +49,8 @@ def annotated(): sub2 = relay.subtract(_add2, z) func = relay.Function([x, y, z], relay.Tuple([sub1, sub2])) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - ctx1.device_type) + func = run_opt_pass(func, + transform.RewriteAnnotatedOps(ctx1.device_type)) return func def expected(): @@ -53,9 +62,9 @@ def expected(): func = relay.Function([x, y, z], relay.Tuple([sub1, sub2])) return func - annotated_func = relay.ir_pass.infer_type(annotated()) - expected_func = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(annotated_func, expected_func) + annotated_func = annotated() + expected_func = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.alpha_equal(annotated_func, expected_func) def test_annotate_expr(): @@ -70,9 +79,8 @@ def annotated(): _add = relay.annotation.on_device(add, ctx1) sub = relay.subtract(_add, z) _sub = relay.annotation.on_device(sub, ctx2) - expr = relay.ir_pass.infer_type(_sub) - expr = relay.ir_pass.rewrite_annotated_ops(expr, - ctx1.device_type) + expr = run_opt_pass(_sub, + transform.RewriteAnnotatedOps(ctx1.device_type)) return expr def expected(): @@ -81,9 +89,9 @@ def expected(): sub = relay.subtract(copy_add_sub, z) return sub - annotated_expr = relay.ir_pass.infer_type(annotated()) - expected_expr = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.graph_equal(annotated_expr, expected_expr) + annotated_expr = annotated() + expected_expr = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(annotated_expr, expected_expr) def test_annotate_all(): @@ -100,9 +108,8 @@ def annotated(): _sub = relay.annotation.on_device(sub, ctx2) func = relay.Function([x, y, z], _sub) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - ctx1.device_type) + func = run_opt_pass(func, + transform.RewriteAnnotatedOps(ctx1.device_type)) return func def expected(): @@ -111,9 +118,9 @@ def expected(): func = relay.Function([x, y, z], sub) return func - annotated_func = relay.ir_pass.infer_type(annotated()) - expected_func = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(annotated_func, expected_func) + annotated_func = annotated() + expected_func = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(annotated_func, expected_func) def test_annotate_none(): @@ -127,9 +134,8 @@ def annotated(): add = relay.add(x, y) sub = relay.subtract(add, z) func = relay.Function([x, y, z], sub) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - ctx1.device_type) + func = run_opt_pass(func, + transform.RewriteAnnotatedOps(ctx1.device_type)) return func def expected(): @@ -138,15 +144,15 @@ def expected(): func = relay.Function([x, y, z], sub) return func - annotated_func = relay.ir_pass.infer_type(annotated()) - expected_func = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(annotated_func, expected_func) + annotated_func = annotated() + expected_func = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(annotated_func, expected_func) def check_annotated_graph(annotated_func, expected_func): - annotated_func = relay.ir_pass.infer_type(annotated_func) - expected_func = relay.ir_pass.infer_type(expected_func) - assert relay.ir_pass.alpha_equal(annotated_func, expected_func) + annotated_func = run_opt_pass(annotated_func, transform.InferType()) + expected_func = run_opt_pass(expected_func, transform.InferType()) + assert relay.analysis.alpha_equal(annotated_func, expected_func) def test_conv_network(): @@ -189,9 +195,8 @@ def original(): padding=(1, 1)) func = relay.Function([data1, data2, weight], conv2d_3) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - tvm.context(3).device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(tvm.context(3).device_type)) return func @@ -221,9 +226,8 @@ def annotated(): _conv2d_3 = relay.annotation.on_device(conv2d_3, dev2) func = relay.Function([data1, data2, weight], _conv2d_3) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - tvm.context(3).device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(tvm.context(3).device_type)) return func class ScheduleConv2d(ExprMutator): @@ -241,7 +245,8 @@ def visit_call(self, expr): def annotate_with_visitor(func): sched = ScheduleConv2d(dev2) func = sched.visit(func) - func = relay.ir_pass.rewrite_annotated_ops(func, dev1.device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(dev1.device_type)) return func def expected(): @@ -273,10 +278,8 @@ def expected(): def check_storage_and_device_types(): func = annotated() - func = relay.ir_pass.rewrite_annotated_ops(func, 3) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.fuse_ops(func, opt_level=2) - func = relay.ir_pass.infer_type(func) + func = run_opt_pass(func, [transform.RewriteAnnotatedOps(3), + transform.FuseOps(2)]) smap = relay.backend._backend.GraphPlanMemory(func) storage_ids = [] device_types = [] @@ -377,9 +380,8 @@ def annotated(): _exp = relay.annotation.on_device(exp, dev_ctx) func = relay.Function([x, y], _exp) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - cpu_ctx.device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(cpu_ctx.device_type)) return func def expected(): @@ -424,9 +426,8 @@ def annotated(): _exp = relay.annotation.on_device(exp, dev_ctx) func = relay.Function([x, y], _exp) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - cpu_ctx.device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(cpu_ctx.device_type)) return func annotated_func = annotated() @@ -449,9 +450,8 @@ def annotated(): _exp = relay.annotation.on_device(exp, cpu_ctx) func = relay.Function([x, y], _exp) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - dev_ctx.device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(dev_ctx.device_type)) return func def expected(): @@ -495,7 +495,7 @@ def run_unpropagatable_graph(dev, tgt): \ / subtract """ - + a = relay.var("a", shape=(10, 10)) b = relay.var("b", shape=(10, 10)) c = relay.var("c", shape=(10, 10)) @@ -507,13 +507,13 @@ def run_unpropagatable_graph(dev, tgt): tmp_add = a_data + b_data tmp_mul = np.multiply(c_data, d_data) ref_res = np.subtract(tmp_add, tmp_mul) - + fallback_device = tvm.context("cpu") target = {"cpu": "llvm", dev: tgt} cpu_ctx = fallback_device dev_ctx = tvm.context(dev) - - def annotated(): + + def annotated(): add = relay.add(a, b) _add = relay.annotation.on_device(add, dev_ctx) mul = relay.multiply(c, d) @@ -521,19 +521,18 @@ def annotated(): sub = relay.subtract(_add, _mul) _sub = relay.annotation.on_device(sub, dev_ctx) func = relay.Function([a, b, c, d], _sub) - func = relay.ir_pass.infer_type(func) - func = relay.ir_pass.rewrite_annotated_ops(func, - dev_ctx.device_type) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(dev_ctx.device_type)) return func - - def expected(): + + def expected(): add = relay.add(a, b) mul = relay.multiply(c, d) copy_mul_sub = relay.device_copy(mul, cpu_ctx, dev_ctx) sub = relay.subtract(add, copy_mul_sub) func = relay.Function([a, b, c, d], sub) return func - + annotated_func = annotated() expected_func = expected() expected_index = [2, 2, 2, 1, 1, 1, 2, 2] @@ -553,7 +552,7 @@ def expected(): mod.run() res = mod.get_output(0).asnumpy() tvm.testing.assert_allclose(res, ref_res, rtol=1e-5, atol=1e-5) - + def test_check_run(): for dev, tgt in [("opencl", "opencl"), ("cuda", "cuda"), @@ -580,7 +579,7 @@ def expected(): elem0 = relay.device_copy(split[0], gpu_ctx, cpu_ctx) elem1 = relay.device_copy(split[1], gpu_ctx, cpu_ctx) sub = elem0 - elem1 - func = relay.Function(relay.ir_pass.free_vars(sub), sub) + func = relay.Function(relay.analysis.free_vars(sub), sub) return func def annotated(): @@ -590,13 +589,14 @@ def annotated(): split = relay.annotation.on_device(split, gpu_ctx) split = relay.TupleWrapper(split, 3) sub = split[0] - split[1] - func = relay.Function(relay.ir_pass.free_vars(sub), sub) - func = relay.ir_pass.rewrite_annotated_ops(func, cpu_ctx.device_type) + func = relay.Function(relay.analysis.free_vars(sub), sub) + func = run_opt_pass( + func, transform.RewriteAnnotatedOps(cpu_ctx.device_type)) return func - annotated_func = relay.ir_pass.infer_type(annotated()) - expected_func = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.graph_equal(annotated_func, expected_func) + annotated_func = annotated() + expected_func = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(annotated_func, expected_func) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_canonicalize_cast.py b/tests/python/relay/test_pass_canonicalize_cast.py index 04478e94039a..c7b88a8dc9e3 100644 --- a/tests/python/relay/test_pass_canonicalize_cast.py +++ b/tests/python/relay/test_pass_canonicalize_cast.py @@ -60,8 +60,11 @@ def check(shape): mod = seq(mod) y = mod[mod.entry_func.name_hint] y_expected = expected(data, conv_weight, bias1, bias2) - y_expected = relay.ir_pass.infer_type(y_expected) - assert relay.ir_pass.alpha_equal(y, y_expected) + gv = relay.GlobalVar("expected") + mod[gv] = y_expected + mod = _transform.InferType()(mod) + y_expected = mod["expected"] + assert relay.analysis.alpha_equal(y, y_expected) check((1, 16, 7, 7)) diff --git a/tests/python/relay/test_pass_check_kind.py b/tests/python/relay/test_pass_check_kind.py index 4d9a2e77eae2..7049ba6f11ed 100644 --- a/tests/python/relay/test_pass_check_kind.py +++ b/tests/python/relay/test_pass_check_kind.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import relay -from tvm.relay.ir_pass import check_kind +from tvm.relay.analysis import check_kind from nose.tools import raises diff --git a/tests/python/relay/test_pass_combine_parallel_conv2d.py b/tests/python/relay/test_pass_combine_parallel_conv2d.py index 3bb656b2bda5..4ea11f42f40d 100644 --- a/tests/python/relay/test_pass_combine_parallel_conv2d.py +++ b/tests/python/relay/test_pass_combine_parallel_conv2d.py @@ -15,7 +15,19 @@ # specific language governing permissions and limitations # under the License. from tvm import relay -import numpy as np +from tvm.relay import transform + + +def run_combine_parallel(expr, min_num_branches=3): + mod = relay.Module.from_expr(expr) + mod = transform.CombineParallelConv2D(min_num_branches)(mod) + return mod[mod.entry_func] + +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + return mod[mod.entry_func] def test_combine_parallel_conv2d(): @@ -54,12 +66,11 @@ def check(x_shape, channels1, channels2, channels3, channels4): w4 = relay.var("w4", shape=(channels4, in_c, 1, 1)) y_before = before(x, w1, w2, w3, w4) - y = relay.ir_pass.infer_type(y_before) - y = relay.ir_pass.combine_parallel_conv2d(y, min_num_branches=2) - y = relay.ir_pass.infer_type(y) + y = run_opt_pass(y_before, + transform.CombineParallelConv2D(min_num_branches=2)) y_expected = expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4) - y_expected = relay.ir_pass.infer_type(y_expected) - assert relay.ir_pass.alpha_equal(y, y_expected) + y_expected = run_opt_pass(y_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y, y_expected) check((1, 4, 16, 16), 4, 4, 4, 4) check((1, 4, 16, 16), 4, 8, 4, 7) @@ -101,12 +112,11 @@ def check(x_shape, channels1, channels2): scale2 = relay.var("scale2", shape=(channels2, 1, 1)) bias = relay.var("bias", shape=(channels2, 1, 1)) y_before = before(x, w1, w2, scale1, scale2, bias) - y = relay.ir_pass.infer_type(y_before) - y = relay.ir_pass.combine_parallel_conv2d(y, min_num_branches=2) - y = relay.ir_pass.infer_type(y) + y = run_opt_pass(y_before, + transform.CombineParallelConv2D(min_num_branches=2)) y_expected = expected(x, w1, w2, scale1, scale2, bias, channels1, channels2) - y_expected = relay.ir_pass.infer_type(y_expected) - assert relay.ir_pass.alpha_equal(y, y_expected) + y_expected = run_opt_pass(y_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y, y_expected) check((1, 4, 16, 16), 4, 8) @@ -141,12 +151,11 @@ def check(x_shape, channels1, channels2): scale1 = relay.var("scale1", shape=(1,)) scale2 = relay.var("scale2", shape=(1,)) y_before = before(x, w1, w2, scale1, scale2) - y = relay.ir_pass.infer_type(y_before) - y = relay.ir_pass.combine_parallel_conv2d(y, min_num_branches=2) - y = relay.ir_pass.infer_type(y) + y = run_opt_pass(y_before, + transform.CombineParallelConv2D(min_num_branches=2)) y_expected = expected(x, w1, w2, scale1, scale2, channels1, channels2) - y_expected = relay.ir_pass.infer_type(y_expected) - assert relay.ir_pass.alpha_equal(y, y_expected) + y_expected = run_opt_pass(y_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y, y_expected) check((1, 4, 16, 16), 4, 8) @@ -178,12 +187,11 @@ def check(x_shape, repeat): out_c = in_c // 2 w = relay.var("w", shape=(out_c, in_c, 1, 1)) y_before = before(x, w, repeat) - y = relay.ir_pass.infer_type(y_before) - y = relay.ir_pass.combine_parallel_conv2d(y, min_num_branches=2) - y = relay.ir_pass.infer_type(y) + y = run_opt_pass(y_before, + transform.CombineParallelConv2D(min_num_branches=2)) y_expected = expected(x, w, out_c, repeat) - y_expected = relay.ir_pass.infer_type(y_expected) - assert relay.ir_pass.alpha_equal(y, y_expected) + y_expected = run_opt_pass(y_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y, y_expected) check((1, 4, 16, 16), 4) diff --git a/tests/python/relay/test_pass_dead_code_elimination.py b/tests/python/relay/test_pass_dead_code_elimination.py index c3b12fea4486..17a836beecd5 100644 --- a/tests/python/relay/test_pass_dead_code_elimination.py +++ b/tests/python/relay/test_pass_dead_code_elimination.py @@ -19,7 +19,7 @@ import tvm from tvm import relay from tvm.relay import Function, transform -from tvm.relay.ir_pass import alpha_equal, graph_equal, free_vars +from tvm.relay.analysis import alpha_equal, graph_equal, free_vars from tvm.relay.op import log, add, equal, subtract @@ -45,28 +45,36 @@ def __init__(self): e = env() +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + + def test_let(): orig = relay.Let(e.x, e.y, e.z) - orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) + orig = run_opt_pass(orig, transform.DeadCodeElimination()) assert alpha_equal(Function(free_vars(orig), orig), Function([e.z], e.z)) def test_used_let(): orig = relay.Let(e.c, e.one, e.c + e.c) - orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) + orig = run_opt_pass(orig, transform.DeadCodeElimination()) expected = relay.Let(e.c, e.one, e.c + e.c) assert alpha_equal(Function([e.c], orig), Function([e.c], expected)) @nottest def test_inline(): orig = relay.Let(e.a, e.b, relay.Let(e.c, e.d, e.c)) - orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) + orig = run_opt_pass(orig, transform.DeadCodeElimination()) assert alpha_equal(Function(free_vars(orig), orig), Function([e.d], e.d)) def test_chain_unused_let(): orig = relay.Let(e.a, e.b, relay.Let(e.c, e.d, e.e)) - orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) + orig = run_opt_pass(orig, transform.DeadCodeElimination()) assert alpha_equal(Function(free_vars(orig), orig), Function([e.e], e.e)) @@ -93,17 +101,17 @@ def test_recursion(): log(data)])) value = relay.Function([n, data], funcbody, e.float32, []) orig = relay.Let(f, value, relay.Call(f, [relay.const(2), relay.const(10000.0)])) - dced = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) - orig = transform.OptimizeOnExpr(orig, transform.InferType()) + dced = run_opt_pass(orig, transform.DeadCodeElimination()) + orig = run_opt_pass(orig, transform.InferType()) assert graph_equal(dced, orig) - dced = transform.OptimizeOnExpr(relay.Let(f, value, e.three), - transform.DeadCodeElimination()) + dced = run_opt_pass(relay.Let(f, value, e.three), + transform.DeadCodeElimination()) assert alpha_equal(dced, e.three) def test_op_let(): - dced = transform.OptimizeOnExpr(add(relay.Let(e.a, e.one, e.three), e.two), - transform.DeadCodeElimination()) + dced = run_opt_pass(add(relay.Let(e.a, e.one, e.three), e.two), + transform.DeadCodeElimination()) assert alpha_equal(dced, add(e.three, e.two)) @@ -112,10 +120,10 @@ def test_tuple_get_item(): t = relay.Var('t', tt) a = relay.Var('a') g = relay.TupleGetItem(t, 0) - dced = transform.OptimizeOnExpr(g, transform.DeadCodeElimination()) + dced = run_opt_pass(g, transform.DeadCodeElimination()) assert alpha_equal(Function(free_vars(dced), dced), Function(free_vars(g), g)) orig = relay.TupleGetItem(relay.Let(a, e.one, t), 0) - dced = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination()) + dced = run_opt_pass(orig, transform.DeadCodeElimination()) assert alpha_equal(Function(free_vars(dced), dced), Function(free_vars(g), g)) diff --git a/tests/python/relay/test_pass_eliminate_common_subexpr.py b/tests/python/relay/test_pass_eliminate_common_subexpr.py index 1ebd834d4da7..f08d0dfd1f26 100644 --- a/tests/python/relay/test_pass_eliminate_common_subexpr.py +++ b/tests/python/relay/test_pass_eliminate_common_subexpr.py @@ -17,7 +17,15 @@ """Test eliminate common subexpr pass""" from tvm import relay from tvm.relay.op import register_alter_op_layout -from tvm.relay import ir_pass +from tvm.relay import transform, analysis + + +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def test_simple(): @@ -37,11 +45,11 @@ def expected(): y = relay.add(y, relay.const(1.0, "float32")) y = relay.add(y, y) f = relay.Function([x], y) - return f + return run_opt_pass(f, transform.InferType()) z = before() - z = ir_pass.eliminate_common_subexpr(z) - assert ir_pass.alpha_equal(z, expected()) + z = run_opt_pass(z, transform.EliminateCommonSubexpr()) + assert analysis.alpha_equal(z, expected()) def test_callback(): @@ -62,7 +70,7 @@ def expected(): y2 = relay.add(y, relay.const(1.0, "float32")) y = relay.add(y1, y2) f = relay.Function([x], y) - return f + return run_opt_pass(f, transform.InferType()) def fskip(expr): if isinstance(expr, relay.expr.Call) and expr.op.name == 'add': @@ -70,8 +78,8 @@ def fskip(expr): return False z = before() - z = ir_pass.eliminate_common_subexpr(z, fskip) - assert ir_pass.alpha_equal(z, expected()) + z = run_opt_pass(z, transform.EliminateCommonSubexpr(fskip)) + assert analysis.alpha_equal(z, expected()) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_eta_expand.py b/tests/python/relay/test_pass_eta_expand.py index 4e20b02357d3..5308e472129a 100644 --- a/tests/python/relay/test_pass_eta_expand.py +++ b/tests/python/relay/test_pass_eta_expand.py @@ -30,10 +30,11 @@ def test_eta_expand_basic(): y = relay.var('y', 'int32') expected = relay.Function([y], orig(y)) - - got = relay.ir_pass.infer_type(got, mod) - expected = relay.ir_pass.infer_type(expected, mod) - assert(relay.ir_pass.alpha_equal(got, expected)) + gv = relay.GlobalVar("gv") + mod[gv] = expected + mod = _transform.InferType()(mod) + expected = mod["gv"] + assert(relay.analysis.alpha_equal(got, expected)) if __name__ == "__main__": test_eta_expand_basic() diff --git a/tests/python/relay/test_pass_fold_constant.py b/tests/python/relay/test_pass_fold_constant.py index 2abeaa8f8db8..881ec8f912c9 100644 --- a/tests/python/relay/test_pass_fold_constant.py +++ b/tests/python/relay/test_pass_fold_constant.py @@ -17,13 +17,24 @@ import numpy as np import tvm from tvm import relay +from tvm.relay import transform + + +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def test_fold_const(): c_data = np.array([1, 2, 3]).astype("float32") + t = relay.TensorType([1, 2, 3], "float32") def before(): c = relay.const(c_data) - x = relay.var("x") + x = relay.var("x", t) y = relay.add(c, c) y = relay.multiply(y, relay.const(2, "float32")) y = relay.add(x, y) @@ -31,7 +42,7 @@ def before(): return relay.Function([x], z) def expected(): - x = relay.var("x") + x = relay.var("x", t) c_folded = (c_data + c_data) * 2 y = relay.add(x, relay.const(c_folded)) z = relay.add(y, relay.const(c_data)) @@ -39,19 +50,21 @@ def expected(): def fail(x): raise RuntimeError() + # the fold constant should work on any context. with tvm.build_config(add_lower_pass=[(0, fail)]): with tvm.target.create("cuda"): - zz = relay.ir_pass.fold_constant(before()) - zexpected = expected() - assert relay.ir_pass.alpha_equal(zz, zexpected) + zz = run_opt_pass(before(), transform.FoldConstant()) + zexpected = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.alpha_equal(zz, zexpected) def test_fold_let(): c_data = np.array(1).astype("float32") + t = relay.TensorType([1], "float32") def before(): sb = relay.ScopeBuilder() - x = relay.var("x") + x = relay.var("x", t) t1 = sb.let("t1", relay.const(c_data)) t2 = sb.let("t2", relay.add(t1, t1)) t3 = sb.let("t3", relay.add(t2, x)) @@ -60,22 +73,23 @@ def before(): def expected(): sb = relay.ScopeBuilder() - x = relay.var("x") + x = relay.var("x", t) c_folded = (c_data + c_data) t3 = sb.let("t3", relay.add(relay.const(c_folded), x)) sb.ret(t3) return relay.Function([x], sb.get()) - zz = relay.ir_pass.fold_constant(before()) - zexpected = expected() - assert relay.ir_pass.graph_equal(zz, zexpected) + zz = run_opt_pass(before(), transform.FoldConstant()) + zexpected = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(zz, zexpected) def test_fold_tuple(): c_data = np.array(1).astype("float32") + t = relay.TensorType([1], "float32") def before(): c = relay.const(c_data) - x = relay.var("x") + x = relay.var("x", t) y = relay.Tuple([x, c]) z = relay.add(y[1], c) z = relay.add(z, y[0]) @@ -83,13 +97,13 @@ def before(): def expected(): c = relay.const(c_data + c_data) - x = relay.var("x") + x = relay.var("x", t) z = relay.add(c, x) return relay.Function([x], z) - zz = relay.ir_pass.fold_constant(before()) - zexpected = expected() - assert relay.ir_pass.graph_equal(zz, zexpected) + zz = run_opt_pass(before(), transform.FoldConstant()) + zexpected = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(zz, zexpected) def test_fold_concat(): @@ -106,9 +120,9 @@ def expected(): y = relay.const(y_data) return relay.Function([], y) - zz = relay.ir_pass.fold_constant(before()) - zexpected = expected() - assert relay.ir_pass.graph_equal(zz, zexpected) + zz = run_opt_pass(before(), transform.FoldConstant()) + zexpected = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.graph_equal(zz, zexpected) def test_fold_shape_of(): @@ -123,17 +137,13 @@ def expected(dtype): x = relay.var("x", shape=c_shape, dtype="float32") y = relay.var("y", shape=c_shape, dtype="float32") z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype) - return relay.ir_pass.infer_type(relay.Function([x, y], z)) + func = relay.Function([x, y], z) + return func for dtype in ["int32", "float32"]: - zbefore = before(dtype) - zz = relay.ir_pass.fold_constant(zbefore) - assert relay.ir_pass.graph_equal(zz, zbefore) - - zz = relay.ir_pass.infer_type(zbefore) - zz = relay.ir_pass.fold_constant(zz) - zexpected = expected(dtype) - assert relay.ir_pass.graph_equal(zz, zexpected) + zz = run_opt_pass(before(dtype), transform.FoldConstant()) + zexpected = run_opt_pass(expected(dtype), transform.InferType()) + assert relay.analysis.graph_equal(zz, zexpected) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_fold_scale_axis.py b/tests/python/relay/test_pass_fold_scale_axis.py index 383f0072059f..70354fbdaa3b 100644 --- a/tests/python/relay/test_pass_fold_scale_axis.py +++ b/tests/python/relay/test_pass_fold_scale_axis.py @@ -14,13 +14,23 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -from tvm import relay import numpy as np +from tvm import relay +from tvm.relay import transform + def _get_positive_scale(size): return np.random.uniform(0.5, 1, size=size).astype('float32') +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + + def test_fold_fwd_simple(): """Simple testcase.""" def before(x, conv_weight, in_bias, in_scale, channels): @@ -59,15 +69,15 @@ def check(shape, channels): in_bias = relay.var("in_bias", shape=(in_channels,)) in_scale = relay.const(_get_positive_scale((in_channels, 1, 1))) y1 = before(x, weight, in_bias, in_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis()) y1_expected = expected(x, weight, in_bias, in_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_folded = run_opt_pass(y1_folded, transform.InferType()) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 2) @@ -129,14 +139,13 @@ def check(shape, channels): in_bias = relay.var("in_bias", shape=(in_channels,)) in_scale = relay.const(_get_positive_scale(in_channels,)) y1 = before(x, weight, in_bias, in_scale, channels) - y1 = relay.ir_pass.infer_type(y1) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) + y1 = run_opt_pass(y1, transform.InferType()) + y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) y1_expected = expected(x, weight, in_bias, in_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 3), 3) @@ -152,7 +161,7 @@ def before(x, conv_weight, in_bias, in_scale, channels): data_layout="NHWC", padding=(1, 1)) z = relay.add(y1, x) - return relay.Function(relay.ir_pass.free_vars(z), z) + return relay.Function(relay.analysis.free_vars(z), z) def check(shape, channels): x = relay.var("x", shape=shape) @@ -163,9 +172,9 @@ def check(shape, channels): in_bias = relay.var("in_bias", shape=(in_channels,)) in_scale = relay.const(_get_positive_scale(size=(in_channels,))) y1 = before(x, weight, in_bias, in_scale, channels) - y1 = relay.ir_pass.infer_type(y1) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) - assert relay.ir_pass.alpha_equal(y1, y1_folded) + y1 = run_opt_pass(y1, transform.InferType()) + y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis()) + assert relay.analysis.alpha_equal(y1, y1_folded) check((2, 11, 10, 4), 4) @@ -181,7 +190,7 @@ def before(x, conv_weight, in_bias, in_scale, channels): data_layout="NHWC", padding=(1, 1)) z = relay.add(y1, x) - return relay.Function(relay.ir_pass.free_vars(z), z) + return relay.Function(relay.analysis.free_vars(z), z) def check(shape, channels, in_scale): x = relay.var("x", shape=shape) @@ -191,9 +200,9 @@ def check(shape, channels, in_scale): weight = relay.var("weight") in_bias = relay.var("in_bias", shape=(in_channels,)) y1 = before(x, weight, in_bias, in_scale, channels) - y1 = relay.ir_pass.infer_type(y1) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) - assert relay.ir_pass.alpha_equal(y1, y1_folded) + y1 = run_opt_pass(y1, transform.InferType()) + y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis()) + assert relay.analysis.alpha_equal(y1, y1_folded) in_scale = relay.var("in_scale", shape=(4,)) check((2, 11, 10, 4), 4, in_scale) @@ -231,14 +240,13 @@ def check(shape, channels): in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1))) weight = relay.var("weight") y1 = before(x, weight, in_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis()) y1_expected = expected(x, weight, in_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 4) @@ -283,14 +291,13 @@ def check(shape, channels): out_scale = relay.const(_get_positive_scale((channels, 1, 1))) y1 = before(x, weight, out_bias, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.backward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) y1_expected = expected(x, weight, out_bias, out_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 8) @@ -343,14 +350,13 @@ def check(shape, channels): out_scale = relay.const(_get_positive_scale((channels, 1, 1))) y1 = before(x, weight, out_bias, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.backward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) y1_expected = expected(x, weight, out_bias, out_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 8) @@ -416,14 +422,13 @@ def check(shape, channels): out_scale = relay.const(_get_positive_scale((channels,1, 1))) y1 = before(x, weight, out_bias, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.backward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) y1_expected = expected(x, weight, out_bias, out_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 4) @@ -470,9 +475,9 @@ def check(shape, channels, fbefore): out_bias = relay.var("out_bias", shape=(channels,)) out_scale = relay.const(_get_positive_scale((channels, 1, 1))) y1 = fbefore(x, weight, out_bias, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) - y1_folded = relay.ir_pass.backward_fold_scale_axis(y1) - assert relay.ir_pass.alpha_equal(y1_folded, y1) + y1 = run_opt_pass(y1, transform.InferType()) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) + assert relay.analysis.alpha_equal(y1_folded, y1) check((4, 4, 10, 10), 4, fail1) check((4, 4, 10, 10), 4, fail2) @@ -488,16 +493,16 @@ def before(x, conv_weight, out_scale, channels): padding=(1, 1)) y = relay.nn.relu(y) y = relay.multiply(x, out_scale) - return relay.Function(relay.ir_pass.free_vars(y), y) + return relay.Function(relay.analysis.free_vars(y), y) def check(shape, channels, out_scale): x = relay.var("x", shape=shape) in_channels = shape[1] weight = relay.var("weight") y1 = before(x, weight, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) - y1_folded = relay.ir_pass.forward_fold_scale_axis(y1) - assert relay.ir_pass.alpha_equal(y1, y1_folded) + y1 = run_opt_pass(y1, transform.InferType()) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) + assert relay.analysis.alpha_equal(y1, y1_folded) out_scale = relay.var("in_scale", shape=(4, 1, 1)) check((4, 4, 10, 10), 4, out_scale) @@ -533,14 +538,13 @@ def check(shape, channels): weight = relay.var("weight") out_scale = relay.const(-_get_positive_scale((channels, 1, 1))) y1 = before(x, weight, out_scale, channels) - y1 = relay.ir_pass.infer_type(y1) + y1 = run_opt_pass(y1, transform.InferType()) type_dict = {x.name_hint:x.checked_type for x in y1.params} weight = relay.var("weight", type_dict["weight"]) - y1_folded = relay.ir_pass.backward_fold_scale_axis(y1) + y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis()) y1_expected = expected(x, weight, out_scale, channels) - y1_folded = relay.ir_pass.infer_type(y1_folded) - y1_expected = relay.ir_pass.infer_type(y1_expected) - assert relay.ir_pass.alpha_equal(y1_folded, y1_expected) + y1_expected = run_opt_pass(y1_expected, transform.InferType()) + assert relay.analysis.alpha_equal(y1_folded, y1_expected) check((2, 4, 10, 10), 8) diff --git a/tests/python/relay/test_pass_fuse_ops.py b/tests/python/relay/test_pass_fuse_ops.py index 0ecbfe6b4d4a..8d358e3f805f 100644 --- a/tests/python/relay/test_pass_fuse_ops.py +++ b/tests/python/relay/test_pass_fuse_ops.py @@ -16,6 +16,16 @@ # under the License. import tvm from tvm import relay +from tvm.relay import transform + + +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_fuse_simple(): """Simple testcase.""" @@ -37,13 +47,10 @@ def expected(): return relay.Function([x], y) z = before() - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - zz = relay.ir_pass.fuse_ops(zz) - zz = relay.ir_pass.infer_type(zz) - after = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + zz = run_opt_pass(z, transform.FuseOps()) + after = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_conv2d_fuse(): @@ -69,7 +76,7 @@ def before(dshape): channels=16) # add can only be fused to z1 z = relay.add(z2, z3) - return relay.Function(relay.ir_pass.free_vars(z), z) + return relay.Function(relay.analysis.free_vars(z), z) def expected(dshape): # segment 0 @@ -111,15 +118,13 @@ def expected(dshape): z2 = relay.Call(f2, [y, relay.var("w3")]) z3 = relay.Call(f3, [y, relay.var("w2"), z2]) z = z3 - return relay.Function(relay.ir_pass.free_vars(z), z) + return relay.Function(relay.analysis.free_vars(z), z) dshape = (1, 16, 64, 64) z = before(dshape) - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_concatenate(): @@ -131,7 +136,7 @@ def before(dshape): upsampled = relay.nn.upsampling(pooled, scale=2, layout="NCHW") concat = relay.concatenate((upsampled, x), axis=1) out = relay.add(concat, relay.const(1, "float32")) - return relay.Function(relay.ir_pass.free_vars(out), out) + return relay.Function(relay.analysis.free_vars(out), out) def expected(dshape): x = relay.var("x", shape=dshape) @@ -152,14 +157,12 @@ def expected(dshape): dshape = (1, 16, 64, 64) z = before(dshape) - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=0) - assert not relay.ir_pass.free_vars(zz) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - assert not relay.ir_pass.free_vars(zz) - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0)) + assert not relay.analysis.free_vars(zz) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + assert not relay.analysis.free_vars(zz) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_tuple_root(): @@ -170,7 +173,7 @@ def before(dshape): pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) upsampled = relay.nn.upsampling(pooled, scale=2, layout="NCHW") out = relay.Tuple((upsampled, x)) - return relay.Function(relay.ir_pass.free_vars(out), out) + return relay.Function(relay.analysis.free_vars(out), out) def expected(dshape): x = relay.var("x", shape=dshape) @@ -189,15 +192,12 @@ def expected(dshape): dshape = (1, 16, 64, 64) z = before(dshape) - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=0) - assert not relay.ir_pass.free_vars(zz) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - assert not relay.ir_pass.free_vars(zz) - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(zz, after) - + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0)) + assert not relay.analysis.free_vars(zz) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + assert not relay.analysis.free_vars(zz) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_stop_fusion(): @@ -224,11 +224,9 @@ def expected(dshape): dshape = (10, 20) z = before(dshape) - z = relay.ir_pass.infer_type(z) - z = relay.ir_pass.fuse_ops(z) - z = relay.ir_pass.infer_type(z) - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(z, after) + zz = run_opt_pass(z, transform.FuseOps()) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_fuse_myia_regression(): @@ -261,10 +259,9 @@ def expected(dshape, dtype): dshape = () dtype = 'int64' f = before(dshape, dtype) - f = relay.ir_pass.infer_type(f) - f = relay.ir_pass.fuse_ops(f) - after = relay.ir_pass.infer_type(expected(dshape, dtype)) - assert relay.ir_pass.alpha_equal(f, after) + zz = run_opt_pass(f, transform.FuseOps()) + after = run_opt_pass(expected(dshape, dtype), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_fuse_tuple_get_elemwise(): @@ -295,14 +292,12 @@ def expected(dim): dim = 10 z = before(dim) - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=0) - assert not relay.ir_pass.free_vars(zz) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - assert not relay.ir_pass.free_vars(zz) - after = relay.ir_pass.infer_type(expected(dim)) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0)) + assert not relay.analysis.free_vars(zz) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + assert not relay.analysis.free_vars(zz) + after = run_opt_pass(expected(dim), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) def test_tuple_get_root(): @@ -332,14 +327,12 @@ def expected(dim): dim = 10 z = before(dim) - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=0) - assert not relay.ir_pass.free_vars(zz) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - assert not relay.ir_pass.free_vars(zz) - after = relay.ir_pass.infer_type(expected(dim)) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0)) + assert not relay.analysis.free_vars(zz) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + assert not relay.analysis.free_vars(zz) + after = run_opt_pass(expected(dim), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) fuse0 = relay.transform.FuseOps(fuse_opt_level=0) @@ -356,7 +349,7 @@ def before(x): concat = relay.concatenate((y1, y2, y3), axis=1) out_inj = relay.squeeze(concat) out = relay.add(out_inj, relay.const(1, "float32")) - return relay.Function(relay.ir_pass.free_vars(out), out) + return relay.Function(relay.analysis.free_vars(out), out) def expected(p0): f0 = before(p0) @@ -370,8 +363,8 @@ def expected(p0): fuse0(relay.Module.from_expr(orig)) m = fuse2(relay.Module.from_expr(orig)) relay.build(m, 'llvm') - after = relay.ir_pass.infer_type(expected(x)) - assert relay.ir_pass.alpha_equal(m[m.entry_func], after) + after = run_opt_pass(expected(x), transform.InferType()) + assert relay.analysis.alpha_equal(m[m.entry_func], after) def test_tuple_consecutive(): @@ -396,7 +389,7 @@ def before(x): out = relay.add(pooled, relay.const(1, "float32")) out2 = relay.add(out, relay.const(1, "float32")) out_tup = relay.Tuple((out, out2)) - return relay.Function(relay.ir_pass.free_vars(out_tup), out_tup) + return relay.Function(relay.analysis.free_vars(out_tup), out_tup) def expected(dshape): p0 = relay.var("p0", shape=dshape) @@ -425,8 +418,8 @@ def expected(dshape): fuse0(relay.Module.from_expr(orig)) m = fuse2(relay.Module.from_expr(orig)) relay.build(m, 'llvm') - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(m[m.entry_func], after) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(m[m.entry_func], after) def test_inception_like(): @@ -446,16 +439,16 @@ def before(dshape): x = relay.var("x", shape=dshape) in1 = inception_like(x) in2 = inception_like(in1) - return relay.Function(relay.ir_pass.free_vars(in2), in2) + return relay.Function(relay.analysis.free_vars(in2), in2) def expected(dshape): p0 = relay.var("p0", shape=dshape) c = conv(p0) - f0 = relay.Function(relay.ir_pass.free_vars(c), c) + f0 = relay.Function(relay.analysis.free_vars(c), c) p01 = relay.var("p01", shape=dshape) c = conv(p01) - f1 = relay.Function(relay.ir_pass.free_vars(c), c) + f1 = relay.Function(relay.analysis.free_vars(c), c) p02 = relay.var("p02", shape=dshape) p12 = relay.var("p12", shape=dshape) @@ -466,11 +459,11 @@ def expected(dshape): p03 = relay.var("p03", shape=dshape2) c = conv(p03) - f2 = relay.Function(relay.ir_pass.free_vars(c), c) + f2 = relay.Function(relay.analysis.free_vars(c), c) p04 = relay.var("p04", shape=dshape2) c = conv(p04) - f3 = relay.Function(relay.ir_pass.free_vars(c), c) + f3 = relay.Function(relay.analysis.free_vars(c), c) p05 = relay.var("p05", shape=dshape) p15 = relay.var("p15", shape=dshape) @@ -485,15 +478,15 @@ def expected(dshape): c4 = relay.Call(f3, [concat, relay.var("w4")]) out = relay.Call(f_concat2, [c3, c4]) - return relay.Function(relay.ir_pass.free_vars(out), out) + return relay.Function(relay.analysis.free_vars(out), out) dshape = (1, 16, 64, 64) orig = before(dshape) fuse0(relay.Module.from_expr(orig)) m = fuse2(relay.Module.from_expr(orig)) relay.build(m, 'llvm') - after = relay.ir_pass.infer_type(expected(dshape)) - assert relay.ir_pass.alpha_equal(m[m.entry_func], after) + after = run_opt_pass(expected(dshape), transform.InferType()) + assert relay.analysis.alpha_equal(m[m.entry_func], after) def test_fuse_parallel_injective(): @@ -518,14 +511,12 @@ def expected(): return relay.Function([x], y) z = before() - z = relay.ir_pass.infer_type(z) - zz = relay.ir_pass.fuse_ops(z, opt_level=0) - assert not relay.ir_pass.free_vars(zz) - zz = relay.ir_pass.fuse_ops(z, opt_level=2) - zz = relay.ir_pass.infer_type(zz) - assert not relay.ir_pass.free_vars(zz) - after = relay.ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(zz, after) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0)) + assert not relay.analysis.free_vars(zz) + zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2)) + assert not relay.analysis.free_vars(zz) + after = run_opt_pass(expected(), transform.InferType()) + assert relay.analysis.alpha_equal(zz, after) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_gradient.py b/tests/python/relay/test_pass_gradient.py index 6fece1b0a6dd..400f5d79b1e4 100644 --- a/tests/python/relay/test_pass_gradient.py +++ b/tests/python/relay/test_pass_gradient.py @@ -14,14 +14,23 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import numpy as np + import tvm from tvm import relay -from tvm.relay.ir_pass import free_vars, free_type_vars, gradient -from tvm.relay import create_executor +from tvm.relay.analysis import free_vars, free_type_vars +from tvm.relay import create_executor, transform +from tvm.relay.transform import gradient from tvm.relay.prelude import Prelude from tvm.relay.testing import add_nat_definitions, make_nat_expr -import numpy as np + +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def rand(dtype='float32', *shape): @@ -34,7 +43,7 @@ def test_id(): t = relay.TensorType(shape, dtype) x = relay.var("x", t) func = relay.Function([x], x) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) ex = create_executor() x = rand(dtype, *shape) @@ -49,7 +58,7 @@ def test_add(): t = relay.TensorType(shape, dtype) x = relay.var("x", t) func = relay.Function([x], x + x) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) ex = create_executor() x = rand(dtype, *shape) @@ -65,7 +74,7 @@ def test_temp_add(): x = relay.var("x", t) y = x + x func = relay.Function([x], y + y) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) ex = create_executor() x = rand(dtype, *shape) @@ -80,7 +89,7 @@ def test_sub(): t = relay.TensorType(shape, dtype) x = relay.var("x", t) func = relay.Function([x], x - x) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) ex = create_executor() x = rand(dtype, *shape) @@ -103,7 +112,7 @@ def test_broadcast_add(): x = relay.var("x", t1) y = relay.var("y", t2) func = relay.Function([x, y], x + y) - full_func = relay.ir_pass.infer_type(gradient(func)) + full_func = run_infer_type(gradient(func)) assert full_func.checked_type == relay.FuncType([t1, t2], relay.TupleType([relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])])) @@ -130,7 +139,7 @@ def test_broadcast_subtract(): x = relay.var("x", t1) y = relay.var("y", t2) func = relay.Function([x, y], x - y) - full_func = relay.ir_pass.infer_type(gradient(func)) + full_func = run_infer_type(gradient(func)) assert full_func.checked_type == relay.FuncType([t1, t2], relay.TupleType([relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])])) @@ -155,7 +164,7 @@ def test_tuple(): relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) - relay.TupleGetItem(tup, 2))) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])])) x_nd = rand(dtype, *shape) y_nd = rand(dtype, *shape) @@ -183,7 +192,10 @@ def test_pow(): double = relay.Function([x], x + x) i = relay.var("i", t) func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i)) - back_func = relay.ir_pass.infer_type(gradient(func, mod=mod), mod=mod) + func = gradient(func, mod=mod) + mod[mod.entry_func] = func + m = transform.InferType()(mod) + back_func = m[m.entry_func] assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) i_nd = rand(dtype, *shape) ex = create_executor(mod=mod) @@ -203,7 +215,7 @@ def test_ref(): body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body) body = relay.Let(r, relay.RefCreate(x), body) func = relay.Function([x], body) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) x_nd = rand(dtype, *shape) ex = create_executor() @@ -218,11 +230,11 @@ def test_square_second_order(): t = relay.TensorType(shape, dtype) x = relay.var("x", t) func = relay.Function([x], x * x) - back_func = relay.ir_pass.infer_type(gradient(func)) + back_func = run_infer_type(gradient(func)) y = relay.var("y", t) back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0)) - back_func_adjusted = relay.ir_pass.infer_type(back_func_adjusted) - back_back_func = relay.ir_pass.infer_type(gradient(back_func_adjusted)) + back_func_adjusted = run_infer_type(back_func_adjusted) + back_back_func = run_infer_type(gradient(back_func_adjusted)) assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])])) x_nd = rand(dtype, *shape) ex = create_executor() @@ -237,8 +249,10 @@ def test_if(): cond = relay.var("cond", shape=(), dtype='uint1') net = relay.If(cond, x, y) net = relay.log(net) - net = relay.ir_pass.infer_type(relay.Function(relay.ir_pass.free_vars(net), net)) - back_func = relay.ir_pass.infer_type(relay.ir_pass.gradient(net, mode='higher_order')) + func = relay.Function(free_vars(net), net) + net = run_infer_type(func) + net = gradient(net, mode='higher_order') + net = run_infer_type(net) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_mac_count.py b/tests/python/relay/test_pass_mac_count.py index a7739a644473..e68c748d1bb1 100644 --- a/tests/python/relay/test_pass_mac_count.py +++ b/tests/python/relay/test_pass_mac_count.py @@ -18,6 +18,16 @@ import numpy as np import tvm from tvm import relay +from tvm.relay import analysis, transform + + +def run_opt_pass(expr, opt_pass): + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def test_gemm(): n = 512 @@ -30,8 +40,8 @@ def test_gemm(): gemm = relay.nn.dense(data1, data2) func = relay.Function([data1, data2], relay.Tuple(tvm.convert([gemm]))) - func = relay.ir_pass.infer_type(func) - compute_count = relay.ir_pass.get_total_mac_number(func) + func = run_opt_pass(func, transform.InferType()) + compute_count = analysis.get_total_mac_number(func) expect_count = n * m * k assert compute_count == expect_count @@ -56,10 +66,9 @@ def test_conv(): channels=output_channel, kernel_size=(kh, kw), padding=(h_padding, w_padding)) - func = relay.Function([data, weight], - relay.Tuple(tvm.convert([conv2d]))) - func = relay.ir_pass.infer_type(func) - compute_count = relay.ir_pass.get_total_mac_number(func) + func = relay.Function([data, weight], relay.Tuple(tvm.convert([conv2d]))) + func = run_opt_pass(func, transform.InferType()) + compute_count = analysis.get_total_mac_number(func) expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw assert compute_count == expect_count @@ -92,11 +101,9 @@ def test_simple_network(): func = relay.Function([data1, data2, weight_conv, weight_dense], relay.Tuple(tvm.convert([conv2d_1, conv2d_2, dense_1, add, flattened]))) - func = relay.ir_pass.infer_type(func) # alter the CONV 2D data layout to test - func = relay.ir_pass.alter_op_layout(func) - func = relay.ir_pass.infer_type(func) - compute_count = relay.ir_pass.get_total_mac_number(func) + func = run_opt_pass(func, transform.AlterOpLayout()) + compute_count = analysis.get_total_mac_number(func) expect_count = 231411712 assert compute_count == expect_count @@ -123,8 +130,8 @@ def test_depthwise_conv2d(): relay.Tuple(tvm.convert([depthwise_conv2d_1, depthwise_conv2d_2, add]))) - func = relay.ir_pass.infer_type(func) - compute_count = relay.ir_pass.get_total_mac_number(func) + func = run_opt_pass(func, transform.InferType()) + compute_count = analysis.get_total_mac_number(func) assert compute_count == 2 * np.prod(dshape) * 3*3 def test_conv_2d_transpose(): @@ -150,8 +157,8 @@ def test_conv_2d_transpose(): padding=(h_padding, w_padding)) func = relay.Function([data, weight], relay.Tuple(tvm.convert([conv2d_transpose]))) - func = relay.ir_pass.infer_type(func) - compute_count = relay.ir_pass.get_total_mac_number(func) + func = run_opt_pass(func, transform.InferType()) + compute_count = analysis.get_total_mac_number(func) expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw assert compute_count == expect_count diff --git a/tests/python/relay/test_pass_manager.py b/tests/python/relay/test_pass_manager.py index a8f50bdb8f55..930dbe045198 100644 --- a/tests/python/relay/test_pass_manager.py +++ b/tests/python/relay/test_pass_manager.py @@ -21,11 +21,18 @@ from tvm import relay from tvm.relay import ExprFunctor from tvm.relay import Function, Call -from tvm.relay import ir_pass +from tvm.relay import analysis from tvm.relay import transform as _transform from tvm.relay.testing import ctx_list +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = _transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + + def get_var_func(): shape = (5, 10) tp = relay.TensorType(shape, "float32") @@ -107,9 +114,9 @@ def get_rand(shape, dtype='float32'): def check_func(func, ref_func): - func = ir_pass.infer_type(func) - ref_func = ir_pass.infer_type(ref_func) - assert ir_pass.graph_equal(func, ref_func) + func = run_infer_type(func) + ref_func = run_infer_type(ref_func) + assert analysis.graph_equal(func, ref_func) def test_module_pass(): @@ -493,8 +500,8 @@ def expected(): mod = seq(mod) zz = mod["main"] - zexpected = ir_pass.infer_type(expected()) - assert relay.ir_pass.alpha_equal(zz, zexpected) + zexpected = run_infer_type(expected()) + assert analysis.alpha_equal(zz, zexpected) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_partial_eval.py b/tests/python/relay/test_pass_partial_eval.py index f2aedd1905d4..6a7f59c91daa 100644 --- a/tests/python/relay/test_pass_partial_eval.py +++ b/tests/python/relay/test_pass_partial_eval.py @@ -18,12 +18,13 @@ import numpy as np import tvm from tvm import relay -from tvm.relay.ir_pass import alpha_equal, gradient +from tvm.relay.analysis import alpha_equal from tvm.relay.prelude import Prelude from tvm.relay import op, create_executor, transform from tvm.relay import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate from tvm.relay import TensorType, Tuple, If, Module, Clause, PatternConstructor, PatternVar, Match from tvm.relay import GlobalVar, Call +from tvm.relay.transform import gradient from tvm.relay.testing import add_nat_definitions, make_nat_expr def check_eval(expr, expected_result, mod=None, rtol=1e-07): @@ -34,11 +35,19 @@ def check_eval(expr, expected_result, mod=None, rtol=1e-07): np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) +def run_opt_pass(expr, passes): + passes = passes if isinstance(passes, list) else [passes] + mod = relay.Module.from_expr(expr) + seq = transform.Sequential(passes) + with transform.PassContext(opt_level=3): + mod = seq(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + + def tipe(expr): - return transform.OptimizeOnExpr(expr, - [transform.InferType(), - transform.PartialEvaluate(), - transform.InferType()]) + return run_opt_pass(expr, [transform.PartialEvaluate(), + transform.InferType()]) def dcpe(expr, mod=None, grad=False): @@ -52,7 +61,7 @@ def dcpe(expr, mod=None, grad=False): seq = transform.Sequential(passes) mod = seq(mod) return mod[mod.entry_func] - return transform.OptimizeOnExpr(expr, passes) + return run_opt_pass(expr, passes) def test_tuple(): @@ -61,7 +70,7 @@ def test_tuple(): body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1) f = Function([x], body, None, [t]) expected = relay.Function([x], x, None, [t]) - expected = transform.OptimizeOnExpr(expected, transform.InferType()) + expected = run_opt_pass(expected, transform.InferType()) assert alpha_equal(dcpe(f), expected) @@ -82,8 +91,7 @@ def test_ref(): body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body) body = Let(r, RefCreate(d), body) square = Function([d], body) - expected = transform.OptimizeOnExpr(Function([d], d * d), - transform.InferType()) + expected = run_opt_pass(Function([d], d * d), transform.InferType()) assert alpha_equal(dcpe(square), expected) @@ -95,7 +103,7 @@ def test_empty_ad(): f = Function([d], d) g = dcpe(f, grad=True) expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])])) - expected = transform.OptimizeOnExpr(expected, transform.InferType()) + expected = run_opt_pass(expected, transform.InferType()) assert alpha_equal(g, expected) @@ -114,7 +122,7 @@ def test_ad(): body = Tuple([x, Tuple([grad])]) body = relay.Let(x1, o, body) expected = Function([d], relay.Let(x, m, body)) - expected = transform.OptimizeOnExpr(expected, transform.InferType()) + expected = run_opt_pass(expected, transform.InferType()) assert alpha_equal(g, expected) diff --git a/tests/python/relay/test_pass_quantize.py b/tests/python/relay/test_pass_quantize.py index fe62c3b5cea4..21aa02df7f3a 100644 --- a/tests/python/relay/test_pass_quantize.py +++ b/tests/python/relay/test_pass_quantize.py @@ -19,10 +19,18 @@ import tvm from tvm import relay from tvm.relay import quantize as qtz +from tvm.relay import transform + + +def run_infer_type(expr): + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def make_dataset(graph, size=100): - args = relay.ir_pass.infer_type(graph).params + args = run_infer_type(graph).params def create_arr(var): ttype = var.type_annotation np_arr = np.random.uniform(-1.0, 1.0, size=ttype.concrete_shape).astype(ttype.dtype) @@ -40,7 +48,7 @@ def create_arr(var): def test_simulated_quantize(): data = relay.var("data", relay.ty.TensorType((3, 4, 5, 6), "float32")) out = qtz._annotate.attach_simulated_quantize(data, 1) - out = relay.ir_pass.infer_type(out) + out = run_infer_type(out) assert out.checked_type == out.args[0].checked_type assert out.args[1].checked_type == relay.ty.TensorType(tuple(), "float32") assert out.args[2].checked_type == relay.ty.TensorType(tuple(), "float32") @@ -59,7 +67,7 @@ def quantize_weight(arr): def make_graph(data): weight = relay.var("conv_weight") out = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1), channels=c) - out = relay.Function(relay.ir_pass.free_vars(out), out) + out = relay.Function(relay.analysis.free_vars(out), out) return out def make_qgraph(data, weight): @@ -72,7 +80,7 @@ def make_qgraph(data, weight): padding=(1, 1), channels=c, out_dtype='int32') out = out.astype('float32') out = relay.multiply(out, relay.const(0.00024414062)) - out = relay.Function(relay.ir_pass.free_vars(out), out) + out = relay.Function(relay.analysis.free_vars(out), out) return out np.random.seed(42) @@ -84,11 +92,11 @@ def make_qgraph(data, weight): with qtz.qconfig(skip_conv_layers=None, global_scale=4.0, round_for_shift=False, store_lowbit_output=False): qgraph0 = qtz.quantize(graph, params) - qgraph0 = relay.ir_pass.infer_type(qgraph0) + qgraph0 = run_infer_type(qgraph0) conv_weight = quantize_weight(params['conv_weight']) qgraph1 = make_qgraph(data, conv_weight) - qgraph1 = relay.ir_pass.infer_type(qgraph1) + qgraph1 = run_infer_type(qgraph1) graph = relay.create_executor('graph') res0 = graph.evaluate(qgraph0)(dataset[0]['data']) diff --git a/tests/python/relay/test_pass_simplify_inference.py b/tests/python/relay/test_pass_simplify_inference.py index aad1d9fc6cf5..4e62fa6dcb08 100644 --- a/tests/python/relay/test_pass_simplify_inference.py +++ b/tests/python/relay/test_pass_simplify_inference.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. from tvm import relay as rly -from tvm.relay.ir_pass import simplify_inference, alpha_equal +from tvm.relay.transform import SimplifyInference def test_simplify_batchnorm(dtype='float32'): def simple_bn(x, gamma, beta, moving_mean, moving_var, @@ -49,10 +49,13 @@ def check(dim, axis, nstep): y2 = simple_bn(y2 + rly.const(1, dtype), gamma, beta, moving_mean, moving_var, epsilon=eps, axis=axis, shape=ttype1.shape) - y1 = rly.ir_pass.infer_type(y1) - y1 = simplify_inference(y1) - assert rly.ir_pass.graph_equal(y1, y2) + mod = rly.Module.from_expr(y1) + simplify = SimplifyInference() + mod = simplify(mod) + y1 = mod["main"].body + + assert rly.analysis.graph_equal(y1, y2) check(2, 1, 1) check(4, 1, 1) diff --git a/tests/python/relay/test_pass_to_a_normal_form.py b/tests/python/relay/test_pass_to_a_normal_form.py index e74168141e63..c12298e465df 100644 --- a/tests/python/relay/test_pass_to_a_normal_form.py +++ b/tests/python/relay/test_pass_to_a_normal_form.py @@ -17,13 +17,23 @@ import numpy as np import tvm from tvm import relay -from tvm.relay.ir_pass import alpha_equal, detect_feature +from tvm.relay.analysis import alpha_equal, detect_feature from tvm.relay import op, create_executor, transform from tvm.relay.prelude import Prelude from tvm.relay.testing import add_nat_definitions, count from tvm.relay.feature import Feature +def run_opt_pass(expr, passes): + passes = passes if isinstance(passes, list) else [passes] + mod = relay.Module.from_expr(expr) + seq = transform.Sequential(passes) + with transform.PassContext(opt_level=3): + mod = seq(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + + def check_eval(expr, expected_result, mod=None, rtol=1e-07): ctx = tvm.context("llvm", 0) intrp = create_executor(mod=mod, ctx=ctx, target="llvm") @@ -38,7 +48,7 @@ def test_explicit_bound(): z = op.add(y, y) f = relay.Function([], op.add(z, z)) assert not Feature.fLet in detect_feature(f) - anf = transform.OptimizeOnExpr(f, transform.ToANormalForm()) + anf = run_opt_pass(f, transform.ToANormalForm()) assert Feature.fLet in detect_feature(anf) check_eval(f(), 8.0) check_eval(anf(), 8.0) @@ -52,8 +62,7 @@ def test_order(): x = relay.const(1) val = x + y * z check_eval(val, 7.0) - anf = transform.OptimizeOnExpr(val, [transform.ToANormalForm(), - transform.InferType()]) + anf = run_opt_pass(val, [transform.ToANormalForm(), transform.InferType()]) a = relay.Var('a', relay.IncompleteType()) b = relay.Var('b', relay.IncompleteType()) c = relay.Var('c', relay.IncompleteType()) @@ -65,16 +74,14 @@ def test_order(): expected_output = relay.Let(c, z, expected_output) expected_output = relay.Let(b, y, expected_output) expected_output = relay.Let(a, x, expected_output) - expected_output = transform.OptimizeOnExpr(expected_output, - transform.InferType()) + expected_output = run_opt_pass(expected_output, transform.InferType()) assert alpha_equal(anf, expected_output) def test_if(): cond = relay.const(True) x = relay.If(cond, relay.const(2), relay.const(3)) - anf = transform.OptimizeOnExpr(x, [transform.ToANormalForm(), - transform.InferType()]) + anf = run_opt_pass(x, [transform.ToANormalForm(), transform.InferType()]) a = relay.Var('a', relay.IncompleteType()) b = relay.Var('b', relay.IncompleteType()) c = relay.Var('c', relay.IncompleteType()) @@ -84,8 +91,7 @@ def test_if(): expected_output = relay.If(c, true_branch, false_branch) expected_output = relay.Let(d, expected_output, d) expected_output = relay.Let(c, cond, expected_output) - expected_output = transform.OptimizeOnExpr(expected_output, - transform.InferType()) + expected_output = run_opt_pass(expected_output, transform.InferType()) assert alpha_equal(anf, expected_output) @@ -133,7 +139,7 @@ def test_ref(): body = relay.Let(iv, relay.RefRead(i), body) body = relay.Let(i, relay.RefCreate(relay.const(1)), body) check_eval(body, 3) - opt_body = transform.OptimizeOnExpr(body, transform.ToANormalForm()) + opt_body = run_opt_pass(body, transform.ToANormalForm()) check_eval(opt_body, 3) @@ -165,7 +171,7 @@ def test_let(): body = relay.Let(y, x, x + y) body = relay.Let(x, d, body) check_eval(body, 8) - opt_body = transform.OptimizeOnExpr(body, transform.ToANormalForm()) + opt_body = run_opt_pass(body, transform.ToANormalForm()) check_eval(opt_body, 8) @@ -174,7 +180,7 @@ def test_function(): x = relay.Var("x", t) f = relay.Function([x], x + x) d = relay.const(4.0, 'float32') - anf_f = transform.OptimizeOnExpr(f, transform.ToANormalForm()) + anf_f = run_opt_pass(f, transform.ToANormalForm()) assert isinstance(anf_f, relay.Function) check_eval(f(d), 8) check_eval(anf_f(d), 8) diff --git a/tests/python/relay/test_pass_to_graph_normal_form.py b/tests/python/relay/test_pass_to_graph_normal_form.py index 09db48f633d9..9e8c5887ac58 100644 --- a/tests/python/relay/test_pass_to_graph_normal_form.py +++ b/tests/python/relay/test_pass_to_graph_normal_form.py @@ -17,9 +17,15 @@ import numpy as np import tvm from tvm import relay -from tvm.relay import op, create_executor, transform -from tvm.relay.ir_pass import detect_feature -from tvm.relay.feature import Feature +from tvm.relay import op, create_executor, transform, Feature +from tvm.relay.analysis import detect_feature + + +def run_opt_pass(expr, opt_pass): + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body def check_eval(expr, args, expected_result, mod=None, rtol=1e-07): @@ -40,7 +46,7 @@ def test_implicit_share(): body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) - g = transform.OptimizeOnExpr(f, transform.ToGraphNormalForm()) + g = run_opt_pass(f, transform.ToGraphNormalForm()) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) @@ -54,8 +60,8 @@ def test_round_trip(): body = relay.Let(z, op.add(y, y), op.add(z, z)) body = relay.Let(y, op.add(x, x), body) f = relay.Function([], relay.Let(x, relay.const(1), body)) - g = transform.OptimizeOnExpr(f, transform.ToGraphNormalForm()) - h = transform.OptimizeOnExpr(g, transform.ToANormalForm()) + g = run_opt_pass(f, transform.ToGraphNormalForm()) + h = run_opt_pass(g, transform.ToANormalForm()) assert Feature.fLet in detect_feature(f) assert not Feature.fLet in detect_feature(g) check_eval(f, [], 8.0) diff --git a/tests/python/relay/test_pass_unmatched_cases.py b/tests/python/relay/test_pass_unmatched_cases.py index 4f2bb20ad7d6..776f5a05722d 100644 --- a/tests/python/relay/test_pass_unmatched_cases.py +++ b/tests/python/relay/test_pass_unmatched_cases.py @@ -18,7 +18,7 @@ import tvm from tvm import relay from tvm.relay.prelude import Prelude -from tvm.relay.ir_pass import unmatched_cases +from tvm.relay.analysis import unmatched_cases def test_empty_match_block(): # empty match block will not match anything, so it should return a wildcard pattern diff --git a/tests/python/relay/test_pass_vars.py b/tests/python/relay/test_pass_vars.py index 2f1ef36e7878..70eb047ad03e 100644 --- a/tests/python/relay/test_pass_vars.py +++ b/tests/python/relay/test_pass_vars.py @@ -16,9 +16,9 @@ # under the License. import tvm from tvm import relay -from tvm.relay.ir_pass import (free_vars, free_type_vars, - bound_vars, bound_type_vars, - all_vars, all_type_vars) +from tvm.relay.analysis import (free_vars, free_type_vars, + bound_vars, bound_type_vars, + all_vars, all_type_vars) def assert_vars_match(actual, expected): assert len(actual) == len(expected) diff --git a/tests/python/relay/test_type_infer.py b/tests/python/relay/test_type_infer.py index 8e047354fafd..29b79283a1fc 100644 --- a/tests/python/relay/test_type_infer.py +++ b/tests/python/relay/test_type_infer.py @@ -17,16 +17,34 @@ """Test that type checker correcly computes types for expressions. """ -import tvm -import numpy as np -from tvm.relay.ir_pass import infer_type from tvm import relay -from tvm.relay import op -from tvm.relay.scope_builder import ScopeBuilder +from tvm.relay import op, transform, analysis + + +def run_infer_type(expr, mod=None): + if not mod: + mod = relay.Module.from_expr(expr) + mod = transform.InferType()(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + else: + if isinstance(expr, relay.GlobalVar): + gv = expr.name_hint + else: + func = expr + if not isinstance(expr, relay.Function): + func = relay.Function(analysis.free_vars(expr), expr) + mod[mod.entry_func] = func + gv = "main" + mod = transform.InferType()(mod) + + if isinstance(expr, (relay.GlobalVar, relay.Function)): + return mod[gv] + return mod[gv].body def assert_has_type(expr, typ, mod=relay.module.Module({})): - checked_expr = infer_type(expr, mod) + checked_expr = run_infer_type(expr, mod) checked_type = checked_expr.checked_type if checked_type != typ: raise RuntimeError("Type mismatch %s vs %s" % ( @@ -48,7 +66,7 @@ def test_monomorphic_let(): sb = relay.ScopeBuilder() x = sb.let('x', relay.const(1.0, "float64")) sb.ret(x) - xchecked = relay.ir_pass.infer_type(sb.get()) + xchecked = run_infer_type(sb.get()) assert xchecked.checked_type == relay.scalar_type("float64" ) @@ -94,7 +112,7 @@ def test_dual_op(): t2 = sb.let("t2", relay.add(t1, x)) sb.ret(t2) f = relay.Function([x], sb.get()) - fchecked = relay.ir_pass.infer_type(f) + fchecked = run_infer_type(f) assert fchecked.checked_type == relay.FuncType([tp], tp) @@ -107,7 +125,7 @@ def @f(%x : Tensor[(10, 10), float32]) { tp = relay.TensorType((10, 10)) x = relay.var("x", tp) f = relay.Function([x], relay.log(x)) - fchecked = relay.ir_pass.infer_type(f) + fchecked = run_infer_type(f) assert fchecked.checked_type == relay.FuncType([tp], tp) @@ -145,7 +163,7 @@ def test_incomplete_call(): f = relay.var('f') func = relay.Function([x, f], relay.Call(f, [x]), tt) - ft = relay.ir_pass.infer_type(func) + ft = run_infer_type(func) f_type = relay.FuncType([tt], tt) assert ft.checked_type == relay.FuncType([tt, f_type], tt) @@ -164,7 +182,7 @@ def test_higher_order_argument(): # function even though id_func takes a type parameter ho_call = ho_func(id_func, relay.const(0, 'int32')) - hc = relay.ir_pass.infer_type(ho_call) + hc = run_infer_type(ho_call) expected = relay.scalar_type('int32') assert hc.checked_type == expected @@ -177,7 +195,7 @@ def test_higher_order_return(): b = relay.TypeVar('b') nested_id = relay.Function([], id_func, relay.FuncType([b], b), [b]) - ft = relay.ir_pass.infer_type(nested_id) + ft = run_infer_type(nested_id) assert ft.checked_type == relay.FuncType([], relay.FuncType([b], b), [b]) @@ -198,7 +216,7 @@ def test_higher_order_nested(): [b]) expected = relay.FuncType([choice_t], relay.FuncType([b], b), [b]) - ft = relay.ir_pass.infer_type(top) + ft = run_infer_type(top) assert ft.checked_type == expected @@ -206,8 +224,7 @@ def test_tuple(): tp = relay.TensorType((10,)) x = relay.var("x", tp) res = relay.Tuple([x, x]) - assert (relay.ir_pass.infer_type(res).checked_type == - relay.TupleType([tp, tp])) + assert (run_infer_type(res).checked_type == relay.TupleType([tp, tp])) def test_ref(): @@ -215,17 +232,17 @@ def test_ref(): y = relay.var("y", "float32") r = relay.RefCreate(x) st = relay.scalar_type("float32") - assert relay.ir_pass.infer_type(r).checked_type == relay.RefType(st) + assert run_infer_type(r).checked_type == relay.RefType(st) g = relay.RefRead(r) - assert relay.ir_pass.infer_type(g).checked_type == st + assert run_infer_type(g).checked_type == st w = relay.RefWrite(r, y) - assert relay.ir_pass.infer_type(w).checked_type == relay.TupleType([]) + assert run_infer_type(w).checked_type == relay.TupleType([]) def test_free_expr(): x = relay.var("x", "float32") y = relay.add(x, x) - yy = relay.ir_pass.infer_type(y) + yy = run_infer_type(y) assert yy.checked_type == relay.scalar_type("float32") assert x.vid.same_as(yy.args[0].vid) @@ -234,7 +251,7 @@ def test_type_args(): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(1, 10)) z = relay.add(x, y) - ty_z = relay.ir_pass.infer_type(z) + ty_z = run_infer_type(z) ty_args = ty_z.type_args assert len(ty_args) == 2 assert ty_args[0].dtype == "float32" @@ -256,15 +273,15 @@ def test_global_var_recursion(): func = relay.Function([x], relay.Call(gv, [x]), tt) mod[gv] = func - ft = relay.ir_pass.infer_type(gv, mod) - assert mod[ft].checked_type == relay.FuncType([tt], tt) + ft = run_infer_type(gv, mod) + assert ft.checked_type == relay.FuncType([tt], tt) def test_equal(): i = relay.var('i', shape=[], dtype='int32') eq = op.equal(i, relay.const(0, dtype='int32')) func = relay.Function([i], eq) - ft = relay.ir_pass.infer_type(func) + ft = run_infer_type(func) assert ft.checked_type == relay.FuncType([relay.scalar_type('int32')], relay.scalar_type('bool')) @@ -275,8 +292,7 @@ def test_constructor_type(): a = relay.TypeVar('a') x = relay.Var('x', a) - ct = relay.ir_pass.infer_type( - relay.Function([x], constructor(x), box(a), [a]), mod) + ct = run_infer_type(relay.Function([x], constructor(x), box(a), [a]), mod) expected = relay.FuncType([a], box(a), [a]) assert ct.checked_type == expected @@ -288,8 +304,8 @@ def test_constructor_call(): box_unit = constructor(relay.Tuple([])) box_constant = constructor(relay.const(0, 'float32')) - ut = relay.ir_pass.infer_type(box_unit, mod) - ct = relay.ir_pass.infer_type(box_constant, mod) + ut = run_infer_type(box_unit, mod) + ct = run_infer_type(box_constant, mod) assert ut.checked_type == box(relay.TupleType([])) assert ct.checked_type == box(relay.TensorType((), 'float32')) @@ -308,7 +324,7 @@ def test_adt_match(): relay.Clause(relay.PatternWildcard(), relay.Tuple([]))]) - mt = relay.ir_pass.infer_type(match, mod) + mt = run_infer_type(match, mod) assert mt.checked_type == relay.TupleType([]) @@ -328,7 +344,7 @@ def test_adt_match_type_annotations(): relay.Tuple([]))]) func = relay.Function([x], match) - ft = relay.ir_pass.infer_type(func, mod) + ft = run_infer_type(func, mod) assert ft.checked_type == relay.FuncType([tt], relay.TupleType([])) diff --git a/tests/python/relay/test_type_solver.py b/tests/python/relay/test_type_solver.py index 81f0222c029a..655b5d794005 100644 --- a/tests/python/relay/test_type_solver.py +++ b/tests/python/relay/test_type_solver.py @@ -26,7 +26,7 @@ def make_rel(name, args, num_inputs=None, attrs=None): return relay.ty.TypeRelation(func, args, num_inputs, attrs) def make_solver(): - solver = relay._ir_pass._test_type_solver() + solver = relay._analysis._test_type_solver() solver.Solve = solver("Solve") solver.Unify = solver("Unify") solver.Resolve = solver("Resolve") diff --git a/tests/python/relay/test_typecall.py b/tests/python/relay/test_typecall.py index 4cb8f4f5d2ce..963f2ac46846 100644 --- a/tests/python/relay/test_typecall.py +++ b/tests/python/relay/test_typecall.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. from tvm import relay -from tvm.relay.ir_pass import infer_type +from tvm.relay import transform def test_dup_type(): a = relay.TypeVar("a") @@ -23,7 +23,10 @@ def test_dup_type(): make_id = relay.Function([av], relay.Tuple([av, av]), None, [a]) t = relay.scalar_type("float32") b = relay.Var("b", t) - assert relay.ir_pass.infer_type(make_id(b)).checked_type == relay.TupleType([t, t]) + mod = relay.Module.from_expr(make_id(b)) + mod = transform.InferType()(mod) + inferred = mod[mod.entry_func].body + assert inferred.checked_type == relay.TupleType([t, t]) def test_id_type(): @@ -36,7 +39,9 @@ def test_id_type(): make_id = relay.Var("make_id", relay.FuncType([b], id_type(b), [b])) t = relay.scalar_type("float32") b = relay.Var("b", t) - assert relay.ir_pass.infer_type(make_id(b), mod).checked_type == id_type(t) + mod[mod.entry_func] = relay.Function([], make_id(b)) + mod = transform.InferType()(mod) + assert mod[mod.entry_func].body.checked_type == id_type(t) if __name__ == "__main__": diff --git a/tests/python/unittest/test_graph_tuner_core.py b/tests/python/unittest/test_graph_tuner_core.py index e0d2dc06c192..1c3171944bc9 100644 --- a/tests/python/unittest/test_graph_tuner_core.py +++ b/tests/python/unittest/test_graph_tuner_core.py @@ -43,7 +43,7 @@ def _create_data(target, dshape, dtype, layout): w2 = relay.var("w2_weight") conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1)) out = relay.add(conv1, conv2) - net = relay.Function(relay.ir_pass.free_vars(out), out) + net = relay.Function(relay.analysis.free_vars(out), out) net, params = relay.testing.create_workload(net) tasks = autotvm.task.extract_from_program(net, target=target, diff --git a/tests/python/unittest/test_graph_tuner_utils.py b/tests/python/unittest/test_graph_tuner_utils.py index 0847166412d2..5bbd1c4860c2 100644 --- a/tests/python/unittest/test_graph_tuner_utils.py +++ b/tests/python/unittest/test_graph_tuner_utils.py @@ -51,7 +51,7 @@ def test_has_multiple_inputs(): w0 = relay.var("w0") out2 = relay.nn.conv2d(data, w0) out = relay.add(out1, out2) - net = relay.Function(relay.ir_pass.free_vars(out), out) + net = relay.Function(relay.analysis.free_vars(out), out) net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1)}) target_ops = ["conv2d"] node_list = [] @@ -80,7 +80,7 @@ def _count_node(node): op_name_list.append("Tuple") else: op_name_list.append("null") - relay.ir_pass.post_order_visit(net, _count_node) + relay.analysis.post_order_visit(net, _count_node) expr2graph(net, target_ops, node_dict, node_list) for i, item in enumerate(zip(op_name_list, node_list)): @@ -97,7 +97,7 @@ def test_get_direct_ancestor(): out3 = out2 + relay.expr.const(2.5) w1 = relay.var("w1") out = relay.nn.conv2d(out3, w1) - net = relay.Function(relay.ir_pass.free_vars(out), out) + net = relay.Function(relay.analysis.free_vars(out), out) net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)}) target_ops = ["conv2d"] node_list = [] @@ -117,7 +117,7 @@ def test_get_in_nodes(): out3 = out2 + relay.expr.const(2.5) w1 = relay.var("w1") out = relay.nn.conv2d(out3, w1) - net = relay.Function(relay.ir_pass.free_vars(out), out) + net = relay.Function(relay.analysis.free_vars(out), out) net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)}) target_ops = ["conv2d"] input_names = ["data"] diff --git a/tutorials/frontend/using_external_lib.py b/tutorials/frontend/using_external_lib.py index a33d4eb9dc7a..35b015bffcd3 100644 --- a/tutorials/frontend/using_external_lib.py +++ b/tutorials/frontend/using_external_lib.py @@ -56,7 +56,7 @@ simple_net = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3,3), channels=out_channels, padding=(1, 1)) simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0] simple_net = relay.nn.relu(simple_net) -simple_net = relay.Function(relay.ir_pass.free_vars(simple_net), simple_net) +simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) data_shape = (batch_size, 3, 224, 224) net, params = testing.create_workload(simple_net) diff --git a/vta/python/vta/top/graphpack.py b/vta/python/vta/top/graphpack.py index 6f901833ea15..f7d7be8c8047 100644 --- a/vta/python/vta/top/graphpack.py +++ b/vta/python/vta/top/graphpack.py @@ -18,9 +18,17 @@ """A Relay implementation of graph packing.""" from tvm import relay -from tvm.relay import op +from tvm.relay import op, transform from tvm.relay import ExprMutator +def run_opt_pass(expr, opt_pass): + """Exectue a relay pass.""" + assert isinstance(opt_pass, transform.Pass) + mod = relay.Module.from_expr(expr) + mod = opt_pass(mod) + entry = mod[mod.entry_func] + return entry if isinstance(expr, relay.Function) else entry.body + def _to_shape(shape): return tuple(int(sh) for sh in shape) @@ -231,7 +239,7 @@ def get_subgraph(expr, start_name, stop_name): """ bitpack_start = op.op.get('annotation.bitpack_start') bitpack_end = op.op.get('annotation.bitpack_end') - anf = relay.ir_pass.to_a_normal_form(expr) + anf = run_opt_pass(expr, transform.ToANormalForm()) def _recursion(anf, start_found, stop_found): """ Helper to obtain the subgraph. """ @@ -262,7 +270,7 @@ def _recursion(anf, start_found, stop_found): assert stop_found return anf annotated = _recursion(anf, False, False) - return relay.ir_pass.infer_type(relay.ir_pass.to_graph_normal_form(annotated)) + return run_opt_pass(annotated, transform.ToGraphNormalForm()) def graph_pack(expr, bfactor, @@ -299,10 +307,10 @@ def graph_pack(expr, """ assert isinstance(expr, relay.Function) expr = get_subgraph(expr, start_name, stop_name) - expr = relay.ir_pass.infer_type(expr) + expr = run_opt_pass(expr, transform.InferType()) packer = ExprPack( bfactor, cfactor, weight_bits) expr = packer.visit(expr) assert not packer.start_pack - return relay.ir_pass.infer_type(expr) + return run_opt_pass(expr, transform.InferType()) diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index 21aa96cd350f..43bc6acc15d5 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -139,7 +139,6 @@ def compile_network(opt, env, target): env.WGT_WIDTH, start_name=opt.start_name, stop_name=opt.stop_name) - relay_prog = relay.ir_pass.fold_constant(relay_prog) return relay_prog, params diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index bdeb6c5d03e2..9f734bc65d92 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -103,7 +103,6 @@ def compile_network(env, target, model, start_pack, stop_pack): env.WGT_WIDTH, start_name=start_pack, stop_name=stop_pack) - relay_prog = relay.ir_pass.fold_constant(relay_prog) return relay_prog, params diff --git a/vta/tutorials/frontend/deploy_resnet_on_vta.py b/vta/tutorials/frontend/deploy_resnet_on_vta.py index 271630e69558..3e252172444a 100644 --- a/vta/tutorials/frontend/deploy_resnet_on_vta.py +++ b/vta/tutorials/frontend/deploy_resnet_on_vta.py @@ -172,7 +172,6 @@ env.WGT_WIDTH, start_name=start_pack, stop_name=stop_pack) - relay_prog = relay.ir_pass.fold_constant(relay_prog) # Compile Relay program with AlterOpLayout disabled with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):