diff --git a/include/tvm/relay/op_attr_types.h b/include/tvm/relay/op_attr_types.h index 88e948f5d72a5..8898954721680 100644 --- a/include/tvm/relay/op_attr_types.h +++ b/include/tvm/relay/op_attr_types.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -105,9 +106,8 @@ using TShapeDataDependant = bool; */ using FTVMCompute = runtime::TypedPackedFunc< Array(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target)>; + const Array& inputs, + const Type& out_type)>; /*! * \brief Build the computation schedule for @@ -123,6 +123,16 @@ using FTVMSchedule = runtime::TypedPackedFunc< const Array& outs, const Target& target)>; +/*! + * \brief Generate the strategy of operators. This function is a generic + * function and can be re-defined for different targets. + * + * The function signature of generic function is: + * OpStrategy(const Attrs& attrs, const Array& inputs, + * const Type& out_type, const Target& target) + */ +using FTVMStrategy = GenericFunc; + /*! * \brief Alternate the layout of operators or replace the * operator with other expressions. This function will be invoked @@ -136,7 +146,8 @@ using FTVMSchedule = runtime::TypedPackedFunc< using FTVMAlterOpLayout = runtime::TypedPackedFunc< Expr(const Attrs& attrs, const Array& args, - const Array& tinfos)>; + const Array& tinfos, + const Type& out_type)>; /*! * \brief Convert the layout of operators or replace the @@ -191,9 +202,7 @@ using FForwardRewrite = runtime::TypedPackedFunc< * \brief Gradient for a specific op. * * \param orig_call the original Expr. - * * \param output_grad the gradient of the Expr. - * * \return the gradient for each parameters. */ using FPrimalGradient = runtime::TypedPackedFunc(const Expr& orig_call, @@ -207,7 +216,7 @@ enum AnyCodegenStrategy { kVariableDimensions }; -/* \brief A runtime representation of shape. */ +/*! \brief A runtime representation of shape. */ using Shape = Array; using FShapeFunc = runtime::TypedPackedFunc< @@ -215,6 +224,174 @@ using FShapeFunc = runtime::TypedPackedFunc< const Array& inputs, const Array& out_ndims)>; +/*! + * \brief Operator implementation in TVM. + */ +class OpImplementNode : public Object { + public: + /*! \brief Compute function */ + FTVMCompute fcompute; + /*! \brief Schedule function */ + FTVMSchedule fschedule; + /*! \brief Priority level */ + Integer plevel; + + void VisitAttrs(tvm::AttrVisitor* v) { + v->Visit("plevel", &plevel); + } + + static constexpr const char* _type_key = "relay.OpImplement"; + TVM_DECLARE_FINAL_OBJECT_INFO(OpImplementNode, Object); +}; + +/*! + * \brief Operator implementation class. + */ +class OpImplement : public ObjectRef { + public: + /*! \brief default constructor */ + OpImplement() {} + /*! \brief constructor from node pointer */ + explicit OpImplement(ObjectPtr n) : ObjectRef(n) {} + /*! + * \brief access the internal node container + * \return the pointer to the internal node container + */ + inline const OpImplementNode* operator->() const; + /*! + * \brief Invoke the operator compute function. + * \param attrs The attribute of the primitive + * \param inputs The input tensors. + * \param out_type The output type information. + * \return The output compute description of the operator. + */ + Array Compute(const Attrs& attrs, + const Array& inputs, + const Type& out_type); + /*! + * \brief Build the computation schedule. + * \param attrs The attribute of the node. + * \param outs The output tensors. + * \param target The build target. + * \return The computation schedule. + */ + te::Schedule Schedule(const Attrs& attrs, + const Array& outs, + const Target& target); +}; + +/*! + * \brief Specialized implementations for operators under certain conditions. + */ +class OpSpecializationNode : public Object { + public: + /*! \brief List of implementations. */ + Array implements; + /*! \brief Condition to enable the specialization. + * Could be undefined to represent generic case. */ + te::SpecializedCondition condition; + + void VisitAttrs(tvm::AttrVisitor* v) { + v->Visit("condition", &condition); + v->Visit("implements", &implements); + } + + static constexpr const char* _type_key = "relay.OpSpecialization"; + TVM_DECLARE_FINAL_OBJECT_INFO(OpSpecializationNode, ExprNode); +}; + +/*! + * \brief Operator specialization class. + */ +class OpSpecialization : public ObjectRef { + public: + OpSpecialization() {} + explicit OpSpecialization(ObjectPtr n) : ObjectRef(n) {} + /*! + * \brief access the internal node container + * \return the pointer to the internal node container + */ + inline const OpSpecializationNode* operator->() const; + /*! + * \brief access the internal node container + * \return the pointer to the internal node container + */ + inline OpSpecializationNode* operator->(); + /*! + * \brief Add an implementation. + * \param compute Compute function + * \param schedule Schedule function + * \param plevel Priority level of this implemntation. + */ + void AddImplement(FTVMCompute fcompute, FTVMSchedule fschedule, + int plevel); +}; + +/*! + * \brief Operator strategy to choose implementation. + */ +class OpStrategyNode : public Object { + public: + /*! \brief List of operator specializations. */ + Array specializations; + + void VisitAttrs(tvm::AttrVisitor* v) { + v->Visit("specializations", &specializations); + } + + static constexpr const char* _type_key = "relay.OpStrategy"; + TVM_DECLARE_FINAL_OBJECT_INFO(OpStrategyNode, ExprNode); +}; + +/*! + * \brief Operator strategy class. + */ +class OpStrategy : public ObjectRef { + public: + /*! \brief default constructor */ + OpStrategy() {} + /*! \brief constructor from node pointer */ + explicit OpStrategy(ObjectPtr n) : ObjectRef(n) {} + /*! + * \brief access the internal node container + * \return the pointer to the internal node container + */ + inline const OpStrategyNode* operator->() const; + /*! + * \brief access the internal node container + * \return the pointer to the internal node container + */ + inline OpStrategyNode* operator->(); + /*! + * \brief Add an implementation. + * \param compute Compute function + * \param schedule Schedule function + * \param plevel Priority level of this implementation. + */ + void AddImplement(FTVMCompute fcompute, FTVMSchedule fschedule, int plevel); +}; + +// implementations +inline const OpImplementNode* OpImplement::operator->() const { + return static_cast(get()); +} + +inline const OpSpecializationNode* OpSpecialization::operator->() const { + return static_cast(get()); +} + +inline OpSpecializationNode* OpSpecialization::operator->() { + return static_cast(get_mutable()); +} + +inline const OpStrategyNode* OpStrategy::operator->() const { + return static_cast(get()); +} + +inline OpStrategyNode* OpStrategy::operator->() { + return static_cast(get_mutable()); +} + } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_ATTR_TYPES_H_ diff --git a/include/tvm/te/schedule.h b/include/tvm/te/schedule.h index e99b54a86565f..2a88f4c8f7e9d 100644 --- a/include/tvm/te/schedule.h +++ b/include/tvm/te/schedule.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -742,6 +743,55 @@ class SingletonNode : public IterVarRelationNode { TVM_DECLARE_FINAL_OBJECT_INFO(SingletonNode, IterVarRelationNode); }; +class SpecializedConditionNode; + +/*! + * \brief Specialized condition to enable op specialization + */ +class SpecializedCondition : public ObjectRef { + public: + SpecializedCondition() {} + explicit SpecializedCondition(ObjectPtr n) : ObjectRef(n) {} + /*! + * \brief Get the current specialized condition. + * \return The current specialized condition. + */ + TVM_DLL static SpecializedCondition Current(); + + const SpecializedConditionNode* operator->() const; + + using ContainerType = SpecializedConditionNode; + class Internal; + private: + // enable with syntax. + friend class Internal; + friend class With; + /*! \brief Push a new specialized condition onto the thread local stack. */ + TVM_DLL void EnterWithScope(); + /*! \brief Pop a specialized condition off the thread local context stack. */ + TVM_DLL void ExitWithScope(); +}; + +/*! \brief Container for specialization conditions. */ +class SpecializedConditionNode : public Object { + public: + /*! + * \brief List of conditions in conjunctive joint form (CNF). + * Each condition should be a simple expression, e.g., n > 16, m % 8 == 0, etc., + * where n, m are tvm::Var that represents a dimension in the tensor shape. + */ + Array clauses; + + void VisitAttrs(AttrVisitor* v) { + v->Visit("clauses", &clauses); + } + + static SpecializedCondition make(Array conditions); + + static constexpr const char* _type_key = "SpecializedCondition"; + TVM_DECLARE_FINAL_OBJECT_INFO(SpecializedConditionNode, Object); +}; + // implementations inline const StageNode* Stage::operator->() const { @@ -765,6 +815,11 @@ inline const IterVarRelationNode* IterVarRelation::operator->() const { inline const IterVarAttrNode* IterVarAttr::operator->() const { return static_cast(get()); } + +inline const SpecializedConditionNode* SpecializedCondition::operator->() const { + return static_cast(get()); +} + } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_H_ diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py index 65cb67266de69..c1b80b887ebfe 100644 --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -50,7 +50,7 @@ from .target import build_config # tvm.te -from .te import decl_tensor_intrin, create_schedule, tag_scope +from .te import decl_tensor_intrin, create_schedule, tag_scope, current_specialization # tvm.testing from . import testing diff --git a/python/tvm/autotvm/__init__.py b/python/tvm/autotvm/__init__.py index cf8362ad83685..eab4ddfeaf7df 100644 --- a/python/tvm/autotvm/__init__.py +++ b/python/tvm/autotvm/__init__.py @@ -41,8 +41,8 @@ from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \ LocalBuilder, LocalRunner, RPCRunner from .tuner import callback -from .task import template, get_config, create, ConfigSpace, ConfigEntity, \ - register_topi_compute, register_topi_schedule, \ +from .task import get_config, create, ConfigSpace, ConfigEntity, \ + register_topi_compute, register_topi_schedule, register_customized_task, \ DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best, \ ApplyGraphBest as apply_graph_best from .env import GLOBAL_SCOPE diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index b02c289cb10f5..489a97f10d5df 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -25,7 +25,7 @@ import tvm from tvm import autotvm, relay from tvm.autotvm.task import get_config -from tvm.autotvm.task.topi_integration import deserialize_args, serialize_args +from tvm.autotvm.task.topi_integration import serialize_args from tvm.autotvm.record import encode, load_from_file from tvm.autotvm.measure import MeasureResult, MeasureInput @@ -35,18 +35,17 @@ from ._base import INVALID_LAYOUT_TIME -# Setup topi_op_name -> layout function -# NOTE: To add more ops, change the following dictionary. -OP2LAYOUT = { - "topi_nn_conv2d": topi.nn.conv2d_infer_layout, - "topi_nn_depthwise_conv2d_nchw": topi.nn.depthwise_conv2d_infer_layout, -} +def get_infer_layout(task_name): + if task_name.startswith("conv2d"): + return topi.nn.conv2d_infer_layout + elif task_name.startswith("depthwise_conv2d"): + return topi.nn.depthwise_conv2d_infer_layout + else: + raise ValueError("Cannot find infer layout for task %s" % task_name) - -@autotvm.template +@autotvm.register_customized_task("layout_transform") def layout_transform(*args): """Autotvm layout transform template.""" - args = deserialize_args(args) cfg = get_config() cfg.add_flop(-1) data = args[0] @@ -82,7 +81,7 @@ def __init__(self, graph, input_shapes, records, target_ops, Each row of this file is an encoded record pair. Otherwise, it is an iterator. - target_ops : List of str + target_ops : List of relay.op.Op Target tuning operators. target : str or tvm.target @@ -104,7 +103,7 @@ def __init__(self, graph, input_shapes, records, target_ops, self._layout_transform_perf_records = {} self._layout_transform_interlayer_cost = {} self._input_shapes = input_shapes - self._target_ops = [op.__name__ for op in target_ops] + self._target_ops = target_ops self._name = name self._max_sch_num = max_sch_num @@ -212,7 +211,7 @@ def _fetch_cfg(self): node_entry["record_candidates"] = cache_dict[workload] continue record_candidates = [] - infer_layout_func = OP2LAYOUT[node_entry["topi_op"][0]] + infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) layout_tracking_dict = {} for record in cfg_dict[workload]: in_measure, out_measure = record @@ -264,7 +263,7 @@ def _iterate_layout_transform(self, callback): if node_entry["op"] in self._target_ops: o_idx = key - o_infer_layout_func = OP2LAYOUT[node_entry["topi_op"][0]] + o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) o_wkl = node_entry["workloads"][0] i_topi_op = in_node_entry["topi_op"][0] i_wkl = in_node_entry["workloads"][0] @@ -273,14 +272,14 @@ def _iterate_layout_transform(self, callback): pivot += 1 i_topi_op = in_node_entry["topi_op"][pivot] i_wkl = in_node_entry["workloads"][pivot] - i_infer_layout_func = OP2LAYOUT[i_topi_op] + i_infer_layout_func = get_infer_layout(i_topi_op) else: o_idx = target_input_idx if i <= target_input_pos: continue - o_infer_layout_func = OP2LAYOUT[node_entry["topi_op"][0]] + o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) o_wkl = node_entry["workloads"][target_input_pos] - i_infer_layout_func = OP2LAYOUT[node_entry["topi_op"][i]] + i_infer_layout_func = get_infer_layout(node_entry["topi_op"][i]) i_wkl = node_entry["workloads"][i] if (i_idx, o_idx) in pair_tracker: diff --git a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py index 7648322d3b182..5c598b5b12600 100644 --- a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py +++ b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -18,8 +18,6 @@ """API for graph traversing.""" import threading -import topi - import tvm from tvm import relay, autotvm from tvm.relay import transform @@ -30,13 +28,6 @@ from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node -# Setup relay op base name -> topi compute functions -# NOTE: To add more ops, change the following dictionary. -OP2COMPUTE = { - "conv2d" : [topi.nn.conv2d, topi.nn.depthwise_conv2d_nchw], -} - - def expr2graph(expr, target_ops, node_dict, node_list): """Convert relay expr to graph data structure and fetch workloads of target operators. @@ -46,8 +37,8 @@ def expr2graph(expr, target_ops, node_dict, node_list): expr : tvm.relay.Expr.Function Input relay function expression. - target_ops: List of str - List of target relay base op name + target_ops: List of relay.op.Op + List of target relay ops node_dict : dictionary from tvm.relay.Expr to int Dictionary to record node index @@ -59,14 +50,10 @@ def expr2graph(expr, target_ops, node_dict, node_list): "name": str, "workloads": [tuple], "topi_op": [function]} """ env = TaskExtractEnv.get(allow_duplicate=True) - topi_funcs = [] - for op_name in target_ops: - if op_name not in OP2COMPUTE: - raise RuntimeError("Not supported relay op in graph tuner: %s" - % op_name) - topi_funcs += OP2COMPUTE[op_name] - env.reset(topi_funcs) - # pylint: disable=not-context-manager + env.reset(target_ops) + # TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact + # that # autotvm tasks == # ops. But this won't be true after having relay op + # strategy. We need to find a solution to fix this. with env: _expr2graph_impl(expr, target_ops, node_dict, node_list) task_pos = 0 @@ -75,8 +62,7 @@ def expr2graph(expr, target_ops, node_dict, node_list): task_name, args = env.task_collection[task_pos] task = autotvm.task.create(task_name, args, target="llvm", - target_host=None, - template_key='direct') + target_host=None) node_entry["workloads"] = [task.workload] node_entry["topi_op"] = [task_name] task_pos += 1 @@ -101,8 +87,8 @@ def _traverse_expr(node): "op": "null", "name": None} if isinstance(node, Call): - op_name = node.op.name.split(".")[-1] - node_entry["op"] = op_name + op = node.op + node_entry["op"] = node.op for arg in node.args: in_node_idx = node_dict[arg] if isinstance(arg, (Tuple, TupleGetItem)): @@ -118,12 +104,12 @@ def _traverse_expr(node): node_entry["types"].append(tupe_type) else: raise RuntimeError("Unsupported output type %s in operator %s" - % (type(out_type), op_name)) + % (type(out_type), op.name)) # Utilize tracing target to fetch workload with topo-order. # Since we only need workload, dummy target can be used to # create task. - if op_name in target_ops: + if op in target_ops: params = [] for i, input_idx in enumerate(node_entry["inputs"]): input_node_entry = node_list[input_idx[0]] @@ -133,7 +119,7 @@ def _traverse_expr(node): "operators with input node of type " "relay.expr.Var/Constant/Call. Now " "find a target op %s with input type %s" - % (op_name, str(type(input_node_entry["node"])))) + % (op, str(type(input_node_entry["node"])))) free_var = relay.Var("var_%d" % i, input_type) params.append(free_var) call = relay.Call(node.op, params, node.attrs) @@ -155,11 +141,9 @@ def _traverse_expr(node): _expr2graph_impl(node, target_ops, node_dict, node_list) return elif isinstance(node, TupleGetItem): - node_entry["op"] = "TupleGetItem" in_node_idx = node_dict[node.tuple_value] node_entry["inputs"].append([in_node_idx, node.index, 0]) elif isinstance(node, Tuple): - node_entry["op"] = "Tuple" for tuple_item in node: in_node_idx = node_dict[tuple_item] if isinstance(tuple_item, TupleGetItem): diff --git a/python/tvm/autotvm/record.py b/python/tvm/autotvm/record.py index fbf4a08f7b0c7..2ea288ed3426f 100644 --- a/python/tvm/autotvm/record.py +++ b/python/tvm/autotvm/record.py @@ -28,6 +28,7 @@ import os import itertools from collections import OrderedDict +import numpy as np from .. import build, lower, target as _target @@ -152,6 +153,7 @@ def clean_json_to_python(x): config = ConfigEntity.from_json_dict(config) inp = MeasureInput(tgt, tsk, config) result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["r"]]) + config.cost = np.mean(result.costs) return inp, result if protocol == 'pickle': @@ -160,6 +162,7 @@ def clean_json_to_python(x): task_tuple = pickle.loads(base64.b64decode(items[1].encode())) config = pickle.loads(base64.b64decode(items[2].encode())) result = pickle.loads(base64.b64decode(items[3].encode())) + config.cost = np.mean(result.costs) tsk = task.Task(task_tuple[0], task_tuple[1]) tsk.workload = task_tuple[3] diff --git a/python/tvm/autotvm/task/__init__.py b/python/tvm/autotvm/task/__init__.py index f249f6bacb906..29313d4b5491b 100644 --- a/python/tvm/autotvm/task/__init__.py +++ b/python/tvm/autotvm/task/__init__.py @@ -22,12 +22,13 @@ of typical tasks of interest. """ -from .task import Task, create, register, template, get_config, args_to_workload +from .task import Task, create, get_config, args_to_workload, \ + register_customized_task from .space import ConfigSpace, ConfigEntity from .code_hash import attach_code_hash, attach_code_hash_to_arg -from .dispatcher import dispatcher, DispatchContext, ApplyConfig, ApplyHistoryBest, \ +from .dispatcher import DispatchContext, ApplyConfig, ApplyHistoryBest, \ FallbackContext, clear_fallback_cache, ApplyGraphBest from .topi_integration import register_topi_compute, register_topi_schedule, \ - TaskExtractEnv + TaskExtractEnv, get_workload from .relay_integration import extract_from_program, extract_from_multiple_program diff --git a/python/tvm/autotvm/task/dispatcher.py b/python/tvm/autotvm/task/dispatcher.py index 28a9fbba28340..4297e23950e2f 100644 --- a/python/tvm/autotvm/task/dispatcher.py +++ b/python/tvm/autotvm/task/dispatcher.py @@ -152,79 +152,6 @@ def __exit__(self, ptype, value, trace): DispatchContext.current = self._old_ctx -def dispatcher(fworkload): - """Wrap a workload dispatcher function. - - Parameters - ---------- - fworkload : function - The workload extraction function from arguments. - - Returns - ------- - fdispatcher : function - A wrapped dispatcher function, which will - dispatch based on DispatchContext and - the current workload. - """ - dispatch_dict = {} - func_name = fworkload.__name__ - - def register(key, func=None, override=False): - """Register template function. - - Parameters - ---------- - key : str or List of str - The template key to identify the template - under this dispatcher. - func : function - The function to be registered. - The first argument of the function is always - cfg returned by DispatchContext, - the rest arguments are the same as the fworkload. - override : bool - Whether override existing registration. - - Returns - ------- - The register function if necessary. - """ - if isinstance(key, str): - key = [key] - - def _do_reg(myf): - for x in key: - if x in dispatch_dict and not override: - raise ValueError( - "Key %s is already registered for %s" % (x, func_name)) - dispatch_dict[x] = myf - return myf - - if func: - return _do_reg(func) - return _do_reg - - def dispatch_func(func, *args, **kwargs): - """The wrapped dispatch function""" - tgt = _target.Target.current() - workload = func(*args, **kwargs) - cfg = DispatchContext.current.query(tgt, workload) - if cfg.is_fallback and not cfg.template_key: - # first try 'direct' template - if 'direct' in dispatch_dict: - return dispatch_dict['direct'](cfg, *args, **kwargs) - # otherwise pick a random template - for v in dispatch_dict.values(): - return v(cfg, *args, **kwargs) - else: - return dispatch_dict[cfg.template_key](cfg, *args, **kwargs) - - fdecorate = decorate(fworkload, dispatch_func) - fdecorate.register = register - return fdecorate - - class ApplyConfig(DispatchContext): """Apply a deterministic config entity for all queries. @@ -336,7 +263,8 @@ def _query_inside(self, target, workload): if key in self._best_user_defined: return self._best_user_defined[key] if key in self.best_by_model: - return self.best_by_model[key][0].config + inp, _ = self.best_by_model[key] + return inp.config # then try matching by target key for k in target.keys: @@ -344,13 +272,16 @@ def _query_inside(self, target, workload): if key in self._best_user_defined: return self._best_user_defined[key] if key in self.best_by_targetkey: - return self.best_by_targetkey[key][0].config + inp, _ = self.best_by_targetkey[key] + return inp.config return None def update(self, target, workload, cfg): model = target.model key = (model, workload) + # assume user provided config is the best + cfg.cost = 0 self._best_user_defined[key] = cfg for k in target.keys: @@ -483,8 +414,12 @@ def _query_inside(self, target, workload): """ if self._counter < len(self._records): cfg = self._records[self._counter][0].config + wkl = self._records[self._counter][0].task.workload + if workload is not None: + assert wkl == workload self._counter += 1 - self.update(target, workload, cfg) + self.update(target, wkl, cfg) + cfg.workload = wkl return cfg key = (str(target), workload) if key not in self._global_cfg_dict: diff --git a/python/tvm/autotvm/task/relay_integration.py b/python/tvm/autotvm/task/relay_integration.py index b39c8d446c7f1..fda646c053f52 100644 --- a/python/tvm/autotvm/task/relay_integration.py +++ b/python/tvm/autotvm/task/relay_integration.py @@ -21,7 +21,6 @@ """ import threading -import warnings import logging @@ -55,8 +54,7 @@ def _lower(mod, compiler.lower(mod, target=target) -def extract_from_program(mod, params, ops, target, target_host=None, - template_keys=None): +def extract_from_program(mod, params, target, target_host=None, ops=None): """ Extract tuning tasks from a relay program. This function is the single program version of extract_from_multiple_program. @@ -67,27 +65,22 @@ def extract_from_program(mod, params, ops, target, target_host=None, The module or function to tune params: dict of str to numpy array The associated parameters of the program - ops: List of relay op - List of relay ops to be tuned target: tvm.target.Target The compilation target target_host: tvm.target.Target The host compilation target - template_keys: dict of topi op to str - The tuning template keys map for schedules, default to None. - Example: {topi.nn.conv2d: 'direct'} + ops: List of relay.op.Op + List of relay ops to be tuned Returns ------- task: Array of autotvm.task.Task collected tasks """ - return extract_from_multiple_program([mod], [params], ops, target, target_host, - template_keys) + return extract_from_multiple_program([mod], [params], target, target_host, ops) -def extract_from_multiple_program(mods, params, ops, target, target_host=None, - template_keys=None): +def extract_from_multiple_program(mods, params, target, target_host=None, ops=None): """ Extract tuning tasks from multiple relay programs. This function collects tuning tasks by building a list of programs @@ -99,15 +92,12 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, The list of modules or functions to tune params: List of dict of str to numpy array The associated parameters of the programs - ops: List of relay op - List of relay ops to be tuned target: tvm.target.Target The compilation target target_host: tvm.target.Target The host compilation target - template_keys: dict of topi op to str - The tuning template keys map for schedules, default to None. - Example: {topi.nn.conv2d: 'direct'} + ops: List of relay.op.Op + List of relay ops to be tuned Returns ------- @@ -115,36 +105,13 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, collected tasks """ # pylint: disable=import-outside-toplevel - import tvm.relay.op from tvm import relay import topi env = TaskExtractEnv.get() - # NOTE: To add more ops, you only need to change the following lists - # relay op -> topi compute - OP2TOPI = { - tvm.relay.op.nn.conv2d: [topi.nn.conv2d, topi.nn.depthwise_conv2d_nchw, - topi.nn.group_conv2d_nchw, - topi.nn.conv2d_NCHWc, - topi.nn.conv2d_NCHWc_int8], - tvm.relay.op.nn.conv2d_transpose: [topi.nn.conv2d_transpose_nchw], - tvm.relay.op.nn.dense: [topi.nn.dense], - tvm.relay.op.nn.batch_matmul: [topi.nn.batch_matmul], - tvm.relay.op.nn.deformable_conv2d: [topi.nn.deformable_conv2d_nchw], - tvm.relay.op.nn.conv1d_transpose: [topi.nn.conv1d_transpose_ncw], - tvm.relay.op.nn.conv3d: [topi.nn.conv3d], - } - - topi_funcs = [] - for op_name in ops: - if op_name in OP2TOPI: - topi_funcs.extend(OP2TOPI[op_name]) - else: - warnings.warn("Op %s is not tunable, ignored" % op_name) - # run compiler to collect all TOPI calls during compilation - env.reset(topi_funcs) + env.reset(ops) with env: # disable logger temporarily old_state = logger.disabled @@ -164,24 +131,12 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, logger.disabled = old_state - # convert *topi op to template key* map to *task name to template key* map - task_name_to_keys = {} - if template_keys is not None: - for op in template_keys.keys(): - if op in env.topi_to_task: - task_name_to_keys[env.topi_to_task[op]] = template_keys[op] - else: - logger.warning("Invalid template key, fallback to direct") - task_name_to_keys[env.topi_to_task[op]] = 'direct' - # create tasks for target tasks = [] for task_name, args in env.get_tasks(): try: - key = task_name_to_keys[task_name] if task_name in task_name_to_keys else 'direct' tsk = create(task_name, args, - target=target, target_host=target_host, - template_key=key) + target=target, target_host=target_host) tasks.append(tsk) except topi.InvalidShapeError: logger.warning("Invalid shape during AutoTVM task creation") diff --git a/python/tvm/autotvm/task/space.py b/python/tvm/autotvm/task/space.py index d83a248c4ece8..36af70eb99fa6 100644 --- a/python/tvm/autotvm/task/space.py +++ b/python/tvm/autotvm/task/space.py @@ -612,9 +612,9 @@ def __init__(self): self._entity_map = OrderedDict() # name -> entity self._constraints = [] self.errors = [] - self.template_key = None self.code_hash = None self.flop = 0 + self.cost = None self.is_fallback = False @staticmethod @@ -790,7 +790,7 @@ def get(self, index): for name, space in self.space_map.items(): entities[name] = space[t % len(space)] t //= len(space) - ret = ConfigEntity(index, self.code_hash, self.template_key, entities, self._constraints) + ret = ConfigEntity(index, self.code_hash, entities, self._constraints) return ret def __iter__(self): @@ -830,17 +830,14 @@ class ConfigEntity(ConfigSpace): index of this config in space code_hash: str hash of schedule code - template_key : str - The specific template key entity_map: dict map name to transform entity constraints : list List of constraints """ - def __init__(self, index, code_hash, template_key, entity_map, constraints): + def __init__(self, index, code_hash, entity_map, constraints): super(ConfigEntity, self).__init__() self.index = index - self.template_key = template_key self._collect = False self._entity_map = entity_map self._space_map = None @@ -891,7 +888,6 @@ def to_json_dict(self): """ ret = {} ret['i'] = int(self.index) - ret['t'] = self.template_key ret['c'] = self.code_hash entity_map = [] for k, v in self._entity_map.items(): @@ -926,7 +922,6 @@ def from_json_dict(json_dict): """ index = json_dict["i"] code_hash = json_dict["c"] - template_key = json_dict["t"] constraints = [] entity_map = OrderedDict() @@ -944,11 +939,10 @@ def from_json_dict(json_dict): raise RuntimeError("Invalid config knob type: " + knob_type) entity_map[str(key)] = entity - return ConfigEntity(index, code_hash, template_key, entity_map, constraints) + return ConfigEntity(index, code_hash, entity_map, constraints) def __repr__(self): - return "%s,%s,%s,%d" % (str(self._entity_map)[12:-1], self.template_key, - self.code_hash, self.index) + return "%s,%s,%d" % (str(self._entity_map)[12:-1], self.code_hash, self.index) class FallbackConfigEntity(ConfigSpace): @@ -1062,4 +1056,4 @@ def __setitem__(self, name, entity): self._entity_map[name] = entity def __repr__(self): - return "%s,%s,%s" % (str(self._entity_map)[12:-1], self.template_key, self.code_hash) + return "%s,%s" % (str(self._entity_map)[12:-1], self.code_hash) diff --git a/python/tvm/autotvm/task/task.py b/python/tvm/autotvm/task/task.py index 9ff8b24fcb5dc..7fbc94e6732fc 100644 --- a/python/tvm/autotvm/task/task.py +++ b/python/tvm/autotvm/task/task.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=unused-variable +# pylint: disable=unused-variable,not-callable """Definition of task function. Task can be constructed from tuple of func, args, and kwargs. @@ -24,10 +24,10 @@ import numpy as np -from ... import tensor, expr, container, target as _target +from ... import tensor, expr, container, placeholder, target as _target -from ..util import get_const_int, get_const_tuple, get_func_name -from .dispatcher import DispatchContext, ApplyConfig, dispatcher +from ..util import get_const_int, get_const_tuple +from .dispatcher import DispatchContext, ApplyConfig from .space import ConfigSpace def _raise_error(*args, **kwargs): # pylint: disable=unused-argument @@ -35,6 +35,39 @@ def _raise_error(*args, **kwargs): # pylint: disable=unused-argument "of this task is registered in another python file " "which is not imported in this run") + +def serialize_args(args): + """serialize arguments of a topi function to a hashable tuple. + + Parameters + ---------- + args: list of hashable or Tensor + """ + ret = [] + for t in args: + if isinstance(t, tensor.Tensor): + ret.append(('TENSOR', get_const_tuple(t.shape), t.dtype)) + else: + ret.append(t) + return tuple(ret) + + +def deserialize_args(args): + """The inverse function of :code:`serialize_args`. + + Parameters + ---------- + args: list of hashable or Tensor + """ + ret = [] + for t in args: + if isinstance(t, tuple) and t[0] == 'TENSOR': + ret.append(placeholder(shape=t[1], dtype=t[2])) + else: + ret.append(t) + return ret + + class Task(object): """A Tunable Task @@ -116,43 +149,134 @@ def __repr__(self): self.name, self.args, self.kwargs, self.workload ) -TASK_TABLE = { -} +TASK_TABLE = {} + +class TopiTemplate(object): + """Topi template that holds the topi compute and schedule function""" + def __init__(self): + self.compute = None + self.schedule = None + self.customized_func = None + + def __call__(self, *args, **kwargs): + args = deserialize_args(args) + if self.customized_func is None: + return self._default_func(*args, **kwargs) + assert callable(self.customized_func) + return self.customized_func(*args, **kwargs) + + def _default_func(self, *args, **kwargs): + assert callable(self.compute) and callable(self.schedule) + out = self.compute(*args, **kwargs) + arg_bufs = [out] + self.get_inputs(out) + s = self.schedule([out]) + return s, arg_bufs + + def get_inputs(self, out): + inputs = [] + queue = [out] + while queue: + t = queue.pop(0) + if isinstance(t.op, tensor.PlaceholderOp): + inputs.append(t) + else: + queue.extend(t.op.input_tensors) + return inputs + +def register_task_compute(name, func=None): + """Register compute function to autotvm task + + Parameters + ---------- + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. -def register(name, func=None, override=False): - """Register a task function. + Returns + ------- + decorator: callable + A decorator + """ + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TopiTemplate() + tmpl = TASK_TABLE[name] + if tmpl.compute is not None: + raise ValueError("Compute is already registered in autoTVM task %s" % name) + tmpl.compute = f + return f + if func: + return _do_reg(func) + return _do_reg + +def register_task_schedule(name, func=None): + """Register schedule function to autotvm task Parameters ---------- - name : str - The name to identify the task. - func : callable - The function to be registered. - override : bool - Whether override existing registration. + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. Returns ------- - func: callable - The registered function + decorator: callable + A decorator """ - def _do_reg(myf): - if name in TASK_TABLE and not override: - raise ValueError( - "Key %s is already registered" % name) - TASK_TABLE[name] = myf - return myf + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TopiTemplate() + tmpl = TASK_TABLE[name] + if tmpl.schedule is not None: + raise ValueError("Schedule is already registered in autoTVM task %s" % name) + tmpl.schedule = f + return f if func: return _do_reg(func) return _do_reg -def create(func_name, args, target, target_host=None, template_key=None): +def register_customized_task(name, func=None): + """Register a customized function to autotvm task. + + Parameters + ---------- + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + """ + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TopiTemplate() + tmpl = TASK_TABLE[name] + if tmpl.customized_func is not None: + raise ValueError("Customized func is already registered in autoTVM task %s" % name) + tmpl.customized_func = f + return f + if func: + return _do_reg(func) + return _do_reg + +def create(task_name, args, target, target_host=None): """Create a tuning task and initialize its search space Parameters ---------- - func_name : str or callable - The task function + task_name : str + The AutoTVM task name args : List Positional arguments target : Target @@ -165,30 +289,18 @@ def create(func_name, args, target, target_host=None, template_key=None): tsk: Task a task object """ - if callable(func_name): - # register this function if it is not registered before - func = func_name - func_name = func.func_name if hasattr(func, 'func_name') else func.__name__ - if func_name in TASK_TABLE: - assert func == TASK_TABLE[func_name], "Find name conflict in task registration. " \ - "Consider to choose another name for this task" - else: - register(func_name, func=func) - - func = TASK_TABLE[func_name] - ret = Task(func_name, args) + ret = Task(task_name, args) if isinstance(target, str): target = _target.create(target) # init config space ret.config_space = ConfigSpace() - ret.config_space.template_key = template_key or "" ctx = ApplyConfig(ret.config_space) with ctx: with target: - sch, _ = func(*args) + sch, _ = ret.func(*args) ret.config_space.code_hash = getattr(sch, 'code_hash', None) ret.workload = ctx.workload @@ -198,7 +310,7 @@ def create(func_name, args, target, target_host=None, template_key=None): return ret -def args_to_workload(x, topi_compute_func=None): +def args_to_workload(x, task_name=None): """Convert argument list to hashable workload tuple. This function will convert list to tuple, tvm node to python value and flatten tvm.tensor.Tensor to a tuple @@ -207,8 +319,8 @@ def args_to_workload(x, topi_compute_func=None): ---------- x: primitive hashable types or tensor.Tensor The original value - topi_compute_func: topi compute function - The function name will be added as first element of the workload tuple + task_name: str + The AutoTVM task name Returns ------- @@ -227,76 +339,76 @@ def args_to_workload(x, topi_compute_func=None): workload = 0 else: raise RuntimeError('Do not support type "%s" in argument. Consider to use' - 'primitive types or tvm.tir.Var only' % type(x)) - return (get_func_name(topi_compute_func), ) + workload if topi_compute_func else workload - -def template(func): - """ - Decorate a function as a tunable schedule template - - Parameters - ---------- - func: callable - A callable template function. - Its argument should be hashable values. - Its return value should be a Tuple(Schedule, Array of Tensor) - - Returns - ------- - func: callable - The decorated function - - Examples - -------- - The following code is a tunable template for a blocked matrix multiplication - - .. code-block:: python - - @autotvm.template - def matmul(N, L, M, dtype): - A = tvm.placeholder((N, L), name='A', dtype=dtype) - B = tvm.placeholder((L, M), name='B', dtype=dtype) - - k = tvm.reduce_axis((0, L), name='k') - C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C') - s = tvm.create_schedule(C.op) - - # schedule - y, x = s[C].op.axis - k = s[C].op.reduce_axis[0] - - ##### define space begin ##### - cfg = autotvm.get_config() - cfg.define_split("tile_y", y, num_outputs=2) - cfg.define_split("tile_x", x, num_outputs=2) - ##### define space end ##### - - # schedule according to config - yo, yi = cfg["tile_y"].apply(s, C, y) - xo, xi = cfg["tile_x"].apply(s, C, x) - - s[C].reorder(yo, xo, k, yi, xi) + 'primitive types or tvm.expr.Var only' % type(x)) + return tuple((task_name, ) + workload) if task_name else workload - return s, [A, B, C] - """ - # pylint: disable=unused-variable - - fname = get_func_name(func) - - @register(fname) - @dispatcher - def config_dispatcher(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - return (fname, ) + args_to_workload(args) - - @config_dispatcher.register("") - def template_call(cfg, *args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - with ApplyConfig(cfg): - return func(*args, **kwargs) - - config_dispatcher.func_name = fname - return config_dispatcher +# def template(func): +# """ +# Decorate a function as a tunable schedule template +# +# Parameters +# ---------- +# func: callable +# A callable template function. +# Its argument should be hashable values. +# Its return value should be a Tuple(Schedule, Array of Tensor) +# +# Returns +# ------- +# func: callable +# The decorated function +# +# Examples +# -------- +# The following code is a tunable template for a blocked matrix multiplication +# +# .. code-block:: python +# +# @autotvm.template +# def matmul(N, L, M, dtype): +# A = tvm.placeholder((N, L), name='A', dtype=dtype) +# B = tvm.placeholder((L, M), name='B', dtype=dtype) +# +# k = tvm.reduce_axis((0, L), name='k') +# C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C') +# s = tvm.create_schedule(C.op) +# +# # schedule +# y, x = s[C].op.axis +# k = s[C].op.reduce_axis[0] +# +# ##### define space begin ##### +# cfg = autotvm.get_config() +# cfg.define_split("tile_y", y, num_outputs=2) +# cfg.define_split("tile_x", x, num_outputs=2) +# ##### define space end ##### +# +# # schedule according to config +# yo, yi = cfg["tile_y"].apply(s, C, y) +# xo, xi = cfg["tile_x"].apply(s, C, x) +# +# s[C].reorder(yo, xo, k, yi, xi) +# +# return s, [A, B, C] +# """ +# # pylint: disable=unused-variable +# +# fname = get_func_name(func) +# +# @register(fname) +# @dispatcher +# def config_dispatcher(*args, **kwargs): +# assert not kwargs, "Do not support kwargs in template function call" +# return (fname, ) + args_to_workload(args) +# +# @config_dispatcher.register("") +# def template_call(cfg, *args, **kwargs): +# assert not kwargs, "Do not support kwargs in template function call" +# with ApplyConfig(cfg): +# return func(*args, **kwargs) +# +# config_dispatcher.func_name = fname +# return config_dispatcher def get_config(): """Get current config object diff --git a/python/tvm/autotvm/task/topi_integration.py b/python/tvm/autotvm/task/topi_integration.py index 3d3a1d3d3a4e3..29796df142717 100644 --- a/python/tvm/autotvm/task/topi_integration.py +++ b/python/tvm/autotvm/task/topi_integration.py @@ -26,6 +26,7 @@ See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. """ +<<<<<<< HEAD import tvm.te._ffi_api from ... import tensor, placeholder @@ -68,6 +69,13 @@ def deserialize_args(args): else: ret.append(t) return ret +======= +from tvm import target as _target + +from ... import _api_internal, tensor +from .task import args_to_workload, DispatchContext, \ + register_task_compute, register_task_schedule, serialize_args +>>>>>>> relay op strategy # Task extractor for relay program @@ -77,250 +85,49 @@ class TaskExtractEnv: registered = None def __init__(self, allow_duplicate=False): - # pylint: disable=import-outside-toplevel - import topi - - # topi compute -> autotvm task name - self.topi_to_task = { - topi.nn.conv2d: "topi_nn_conv2d", - topi.nn.depthwise_conv2d_nchw: "topi_nn_depthwise_conv2d_nchw", - topi.nn.group_conv2d_nchw: "topi_nn_group_conv2d_nchw", - topi.nn.conv2d_transpose_nchw: "topi_nn_conv2d_transpose_nchw", - topi.nn.conv2d_NCHWc: "topi_x86_conv2d_NCHWc", - topi.nn.conv2d_NCHWc_int8: "topi_x86_conv2d_NCHWc_int8", - topi.nn.dense: "topi_nn_dense", - topi.nn.batch_matmul: "topi_nn_batch_matmul", - topi.nn.bitserial_conv2d_nchw: "topi_nn_bitserial_conv2d_nchw", - topi.nn.bitserial_conv2d_nhwc: "topi_nn_bitserial_conv2d_nhwc", - topi.nn.bitserial_dense: "topi_nn_bitserial_dense", - topi.nn.deformable_conv2d_nchw: "topi_nn_deformable_conv2d_nchw", - topi.nn.conv1d_transpose_ncw: "topi_nn_conv1d_transpose_ncw", - topi.nn.conv3d: "topi_nn_conv3d", - } - - self.topi_to_schedule = { - topi.nn.conv2d: [topi.generic.schedule_conv2d_nchw, - topi.generic.schedule_conv2d_nhwc], - topi.nn.depthwise_conv2d_nchw: [topi.generic.schedule_depthwise_conv2d_nchw, - topi.generic.schedule_depthwise_conv2d_nhwc], - topi.nn.group_conv2d_nchw: [topi.generic.schedule_group_conv2d_nchw], - topi.nn.conv2d_transpose_nchw: [topi.generic.schedule_conv2d_transpose_nchw], - topi.nn.conv2d_NCHWc: [topi.generic.schedule_conv2d_NCHWc], - topi.nn.conv2d_NCHWc_int8: [topi.generic.schedule_conv2d_NCHWc_int8], - topi.nn.dense: [topi.generic.schedule_dense], - topi.nn.batch_matmul: [topi.generic.schedule_batch_matmul], - topi.nn.bitserial_conv2d_nchw: [topi.generic.schedule_bitserial_conv2d_nchw], - topi.nn.bitserial_conv2d_nhwc: [topi.generic.schedule_bitserial_conv2d_nhwc], - topi.nn.bitserial_dense: [topi.generic.schedule_bitserial_dense], - topi.nn.deformable_conv2d_nchw: [topi.generic.schedule_deformable_conv2d_nchw], - topi.nn.conv1d_transpose_ncw: [topi.generic.schedule_conv1d_transpose_ncw], - topi.nn.conv3d: [topi.generic.schedule_conv3d_ndhwc], - } - - # function reflection for tracing - self.func_to_reflection = { - topi.nn.conv2d: lambda x: setattr(topi.nn, 'conv2d', x), - topi.nn.conv2d_NCHWc: lambda x: setattr(topi.nn, 'conv2d_NCHWc', x), - topi.nn.conv2d_NCHWc_int8: lambda x: setattr(topi.nn, 'conv2d_NCHWc_int8', x), - topi.nn.depthwise_conv2d_nchw: lambda x: setattr(topi.nn, 'depthwise_conv2d_nchw', x), - topi.nn.group_conv2d_nchw: lambda x: setattr(topi.nn, 'group_conv2d_nchw', x), - topi.nn.conv2d_transpose_nchw: lambda x: setattr(topi.nn, 'conv2d_transpose_nchw', x), - topi.nn.dense: lambda x: setattr(topi.nn, 'dense', x), - topi.nn.batch_matmul: lambda x: setattr(topi.nn, 'batch_matmul', x), - topi.nn.bitserial_conv2d_nchw: lambda x: setattr(topi.nn, 'bitserial_conv2d_nchw', x), - topi.nn.bitserial_conv2d_nhwc: lambda x: setattr(topi.nn, 'bitserial_conv2d_nhwc', x), - topi.nn.bitserial_dense: lambda x: setattr(topi.nn, 'bitserial_dense', x), - topi.nn.deformable_conv2d_nchw: lambda x: setattr(topi.nn, 'deformable_conv2d_nchw', x), - topi.nn.conv1d_transpose_ncw: lambda x: setattr(topi.nn, 'conv1d_transpose_ncw', x), - topi.nn.conv3d: lambda x: setattr(topi.nn, 'conv3d', x), - } - self.allow_duplicate = allow_duplicate - self._register_topi_task() self.task_collection = [] - self.wanted_topi_funcs = list(self.topi_to_task.keys()) + self.wanted_relay_ops = None self.modified_funcs = [] + self.tracing = False def __enter__(self): self.task_collection = [] - self.modified_funcs = [] - - for topi_compute in self.wanted_topi_funcs: - def _local_scope(compute_func): - """start a scope to hold the local function in for loop""" - - def _tracing_wrapper(*args, **kwargs): - assert not kwargs, "Do not support extracting tuning tasks when " \ - "kwargs is used in TOPI function call. " \ - "Please modify it to use only positional args." - key = (self.topi_to_task[compute_func], serialize_args(args)) - if self.allow_duplicate or key not in self.task_collection: - self.task_collection.append(key) - - return compute_func(*args, **kwargs) - - self.func_to_reflection[compute_func](_tracing_wrapper) - self.modified_funcs.append(compute_func) - - _local_scope(topi_compute) + self.tracing = True return self def __exit__(self, exc_type, exc_val, exc_tb): - # revert modification - for func in self.modified_funcs: - self.func_to_reflection[func](func) - - def _register_topi_task(self): - """register tuning wrapper for topi function""" - # pylint: disable=import-outside-toplevel - import topi - - # Avoid double registration for certain targets - if TaskExtractEnv.registered: - return - TaskExtractEnv.registered = True - - # Tuning wrapper for topi functions - @register("topi_nn_conv2d") - def _topi_nn_conv2d(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - layout = args[-2] - C = topi.nn.conv2d(*args, **kwargs) - if layout == 'NCHW': - s = topi.generic.schedule_conv2d_nchw([C]) - elif layout == 'HWCN': - s = topi.generic.schedule_conv2d_hwcn([C]) - elif layout == 'NHWC': - s = topi.generic.schedule_conv2d_nhwc([C]) - else: - raise ValueError("Unsupported layout {}".format(layout)) - return s, [A, W, C] + self.tracing = False - @register("topi_nn_depthwise_conv2d_nchw") - def _topi_nn_depthwise_conv2d_nchw(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.depthwise_conv2d_nchw(*args, **kwargs) - s = topi.generic.schedule_depthwise_conv2d_nchw([C]) - return s, [A, W, C] - - @register("topi_nn_group_conv2d_nchw") - def _topi_nn_group_conv2d_nchw(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.group_conv2d_nchw(*args, **kwargs) - s = topi.generic.schedule_group_conv2d_nchw([C]) - return s, [A, W, C] - - @register("topi_nn_conv2d_transpose_nchw") - def _topi_nn_conv2d_transpose_nchw(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.conv2d_transpose_nchw(*args, **kwargs) - s = topi.generic.schedule_conv2d_transpose_nchw([C]) - return s, [A, W, C] - - @register("topi_nn_conv1d_transpose_ncw") - def _topi_nn_conv1d_transpose_ncw(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.conv1d_transpose_ncw(*args, **kwargs) - s = topi.generic.schedule_conv1d_transpose_ncw([C]) - return s, [A, W, C] - - @register("topi_nn_conv3d") - def _topi_nn_conv3d(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.conv3d(*args, **kwargs) - s = topi.generic.schedule_conv3d_ndhwc([C]) - return s, [A, W, C] - - @register("topi_nn_dense") - def _topi_nn_dense(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - if len(args) > 2: - data, weight, bias = args[:3] - else: - data, weight = args - bias = None - C = topi.nn.dense(*args, **kwargs) - s = topi.generic.schedule_dense([C]) - if bias is not None: - return s, [data, weight, bias, C] - return s, [data, weight, C] - - @register("topi_nn_batch_matmul") - def _topi_nn_batch_matmul(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, B = args - C = topi.nn.batch_matmul(A, B) - s = topi.generic.schedule_batch_matmul([C]) - return s, [A, B, C] - - @register("topi_nn_bitserial_conv2d_nhwc") - def _topi_bitserial_conv2d_nhwc(*args, **kwargs): - args = deserialize_args(args) - C = topi.nn.bitserial_conv2d_nhwc(*args, **kwargs) - s = topi.generic.nn.schedule_bitserial_conv2d_nhwc([C]) - A, W = args[:2] - return s, [A, W, C] - - @register("topi_nn_bitserial_conv2d_nchw") - def _topi_bitserial_conv2d_nchw(*args, **kwargs): - args = deserialize_args(args) - C = topi.nn.bitserial_conv2d_nchw(*args, **kwargs) - s = topi.generic.nn.schedule_bitserial_conv2d_nchw([C]) - A, W = args[:2] - return s, [A, W, C] - - @register("topi_nn_bitserial_dense") - def _topi_nn_bitserial_dense(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.bitserial_dense(*args, **kwargs) - s = topi.generic.schedule_bitserial_dense([C]) - return s, [A, W, C] - - @register("topi_nn_deformable_conv2d_nchw") - def _topi_nn_deformable_conv2d_nchw(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, Offset, W = args[:3] - C = topi.nn.deformable_conv2d_nchw(*args, **kwargs) - s = topi.generic.schedule_deformable_conv2d_nchw([C]) - return s, [A, Offset, W, C] - - @register("topi_nn_conv2d_NCHWc") - def _topi_nn_conv2d_NCHWc(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - A, W = args[:2] - C = topi.nn.conv2d_NCHWc(*args, **kwargs) - s = topi.generic.schedule_conv2d_NCHWc([C]) - return s, [A, W, C] - - def reset(self, wanted_topi_funcs): + def reset(self, wanted_relay_ops=None): """Reset task collections Parameters ---------- - wanted_topi_funcs: List of function - The topi function to be extracted + wanted_relay_ops: List of relay.op.Op + The relay ops to be extracted """ self.task_collection = [] - self.wanted_topi_funcs = wanted_topi_funcs + self.wanted_relay_ops = wanted_relay_ops + + def add_task(self, task_name, args): + """Add AutoTVM task + + Parameters + ---------- + task_name: str + AutoTVM task name. + + args: tuple + Arguments to the TOPI function. + + cond: SpecializedCondition + Specialized condition to enable the TOPI template. + """ + key = (task_name, serialize_args(args)) + if self.allow_duplicate or key not in self.task_collection: + self.task_collection.append(key) def get_tasks(self): """Get collected tasks @@ -355,7 +162,7 @@ def get(allow_duplicate=False): return TaskExtractEnv.current -def register_topi_compute(topi_compute, target_keys, template_keys, func=None, override=False): +def register_topi_compute(task_name, func=None): """Register a tunable template for a topi compute function. After the registration, this topi compute will become a configuration dispatcher. It uses @@ -366,15 +173,9 @@ def register_topi_compute(topi_compute, target_keys, template_keys, func=None, o Parameters ---------- - topi_compute: GenericFunc - The topi compute function that will be overloaded - target_keys: str or list of str - The compilation target. The same as the argument of GenericFunc.register. - template_keys: str or list of str - The template key. - We might have several strategies for a single operator (e.g. direct, im2col, winograd). - The template key is used to identity the algorithm strategy. - Every operator must have a "direct" template, which is used by default. + task_name: str + The AutoTVM task name + func: None or callable If it is None, return a decorator. If is callable, decorate this function. @@ -388,6 +189,7 @@ def register_topi_compute(topi_compute, target_keys, template_keys, func=None, o -------- See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. """ +<<<<<<< HEAD def _decorator(f): targets = [target_keys] if isinstance(target_keys, str) else target_keys for target_key in targets: @@ -436,14 +238,50 @@ def template_call(cfg, *args, **kwargs): return [op.output(i) for i in range(len(node))] return f +======= + def _decorate(topi_compute): + @register_task_compute(task_name) + def wrapper(*args, **kwargs): + """wrapper function for topi compute""" + assert not kwargs, "Do not support kwargs in template function call" + task_env = TaskExtractEnv.current + if task_env is not None and task_env.tracing: + task_env.add_task(task_name, args) + workload = args_to_workload(args, task_name) + tgt = _target.current_target() + cfg = DispatchContext.current.query(tgt, workload) + node = topi_compute(cfg, *args) + + # attach workload to return op + op = node.op + attrs = {} + for k, v in node.op.attrs.items(): + attrs[k] = v + attrs['workload'] = workload + if isinstance(op, tensor.ComputeOp): + op = _api_internal._ComputeOp( + op.name, op.tag, attrs, op.axis, op.body) + elif isinstance(op, tensor.ExternOp): + op = _api_internal._ExternOp( + op.name, op.tag, attrs, + op.inputs, op.input_placeholders, + op.output_placeholders, op.body) + else: + raise RuntimeError("Unsupported op type: " + str(type(op))) +>>>>>>> relay op strategy - if func: - _decorator(func) + if isinstance(node, tensor.Tensor): + return op.output(0) + return [op.output(i) for i in range(len(node))] + + return wrapper - return _decorator + if func: + return _decorate(func) + return _decorate -def register_topi_schedule(topi_schedule, target_keys, template_keys, func=None, override=False): +def register_topi_schedule(task_name, func=None): """Register a tunable template for a topi schedule function. After the registration. This topi schedule will become a configuration dispatcher. It dispatches @@ -452,17 +290,13 @@ def register_topi_schedule(topi_schedule, target_keys, template_keys, func=None, Note that this function will try to find "workload" from all the ComputeOp in the input. You can attach "workload" to your compute op by using :any:`register_topi_compute`. + The task name need to match with the task name of the corresponding topi compute function. + Parameters ---------- - topi_schedule: GenericFunc - The topi schedule function that will be overloaded - target_keys: str or list of str - The compilation target - template_keys: str or list of str - The template key. - We might have several strategies for a single operator (e.g. direct, im2col, winograd). - The template key is used to identity the algorithm strategy. - Every operator must have a "direct" template, which is used by default. + task_name: str + The AutoTVM task name + func: None or callable If it is None, return a decorator. If is callable, decorate this function. @@ -476,49 +310,33 @@ def register_topi_schedule(topi_schedule, target_keys, template_keys, func=None, -------- See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. """ - def _decorator(f): - targets = [target_keys] if isinstance(target_keys, str) else target_keys - for target_key in targets: - if target_key not in _REGISTERED_DISPATCHER: - _REGISTERED_DISPATCHER[target_key] = {} - if topi_schedule not in _REGISTERED_DISPATCHER[target_key]: - @topi_schedule.register(target_key) - @dispatcher - def config_dispatcher(outs, *args, **kwargs): - """override topi call as a workload dispatcher""" - def traverse(tensors): - """traverse all ops to find attached workload""" - for t in tensors: - op = t.op - if 'workload' in op.attrs: - return op.attrs['workload'] - wkl = traverse(op.input_tensors) - if wkl: - return wkl - return None - - outs = [outs] if isinstance(outs, tensor.Tensor) else outs - workload = traverse(outs) - - if workload is None: - raise RuntimeError("Cannot find workload in attribute of this schedule") - - return args_to_workload(workload) - - _REGISTERED_DISPATCHER[target_key][topi_schedule] = config_dispatcher - - config_dispatcher = _REGISTERED_DISPATCHER[target_key][topi_schedule] - - @config_dispatcher.register(template_keys, override=override) - def template_call(cfg, outs, *args, **kwargs): - """call the schedule func""" - if f == topi_schedule.fdefault: - return f(outs, *args, **kwargs) - return f(cfg, outs, *args, **kwargs) - - return f - + def _decorate(topi_schedule): + @register_task_schedule(task_name) + def wrapper(outs, *args, **kwargs): + """wrapper function for topi schedule""" + workload = get_workload(outs) + if workload is None: + raise RuntimeError("Cannot find workload in attribute of this schedule") + tgt = _target.current_target() + cfg = DispatchContext.current.query(tgt, workload) + return topi_schedule(cfg, outs, *args, **kwargs) + return wrapper if func: - _decorator(func) - - return _decorator + return _decorate(func) + return _decorate + + +def get_workload(outs): + """Retrieve the workload from outputs""" + def traverse(tensors): + """traverse all ops to find attached workload""" + for t in tensors: + op = t.op + if 'workload' in op.attrs: + return args_to_workload(op.attrs['workload']) + wkl = traverse(op.input_tensors) + if wkl: + return wkl + return None + outs = [outs] if isinstance(outs, tensor.Tensor) else outs + return traverse(outs) diff --git a/python/tvm/autotvm/tophub.py b/python/tvm/autotvm/tophub.py index e1a7d86695f26..ce0be70e4a15f 100644 --- a/python/tvm/autotvm/tophub.py +++ b/python/tvm/autotvm/tophub.py @@ -189,7 +189,7 @@ def download_package(tophub_location, package_name): # global cache for load_reference_log REFERENCE_LOG_CACHE = {} -def load_reference_log(backend, model, workload_name, template_key): +def load_reference_log(backend, model, workload_name): """ Load reference log from TopHub to support fallback in template. Template will use these reference logs to choose fallback config. @@ -201,8 +201,6 @@ def load_reference_log(backend, model, workload_name, template_key): The name of the device model workload_name: str The name of the workload. (The first item in the workload tuple) - template_key: str - The template key """ backend = _alias(backend) @@ -211,7 +209,7 @@ def load_reference_log(backend, model, workload_name, template_key): filename = os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name) global REFERENCE_LOG_CACHE - key = (backend, model, workload_name, template_key) + key = (backend, model, workload_name) if key not in REFERENCE_LOG_CACHE: tmp = [] @@ -233,8 +231,7 @@ def load_reference_log(backend, model, workload_name, template_key): model = max(counts.items(), key=lambda k: k[1])[0] for inp, res in load_from_file(filename): - if (model == inp.target.model and inp.task.workload[0] == workload_name and - inp.config.template_key == template_key): + if model == inp.target.model and inp.task.workload[0] == workload_name: tmp.append((inp, res)) REFERENCE_LOG_CACHE[key] = tmp diff --git a/python/tvm/autotvm/tuner/xgboost_cost_model.py b/python/tvm/autotvm/tuner/xgboost_cost_model.py index 882b0ad19dd50..305244808a33a 100644 --- a/python/tvm/autotvm/tuner/xgboost_cost_model.py +++ b/python/tvm/autotvm/tuner/xgboost_cost_model.py @@ -219,8 +219,7 @@ def fit_log(self, records, plan_size): # filter data, only pick the data with a same task data = [] for inp, res in records: - if inp.task.name == self.task.name and \ - inp.config.template_key == self.task.config_space.template_key: + if inp.task.name == self.task.name: data.append((inp, res)) logger.debug("XGB load %d entries from history log file", len(data)) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index 4eedd23faa1c3..e07baf20e54bd 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -14,18 +14,38 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +# pylint: disable=len-as-condition,no-else-return """Backend code generation engine.""" from __future__ import absolute_import +import hashlib +import numpy as np +import tvm +from topi import tag from ..base import register_relay_node, Object +from ... import _api_internal from ... import target as _target +from ..._ffi.function import register_func +from ... import autotvm from .. import expr as _expr +from .. import op as _op +from .. import ty as _ty +from ..expr_functor import ExprVisitor from . import _backend @register_relay_node class CachedFunc(Object): """Low-level tensor function to back a relay primitive function. """ + def __init__(self, target, func_name, inputs, outputs, schedule=None, + lowered_funcs=None, shape_func_param_states=None): + if lowered_funcs is None: + lowered_funcs = [] + if shape_func_param_states is None: + shape_func_param_states = [] + self.__init_handle_by_constructor__( + _backend._make_CachedFunc, target, func_name, inputs, outputs, + schedule, lowered_funcs, shape_func_param_states) @register_relay_node @@ -63,6 +83,316 @@ def _get_cache_key(source_func, target): return source_func +def get_shape(shape): + """Convert the shape to correct dtype and vars.""" + ret = [] + for dim in shape: + if isinstance(dim, tvm.expr.IntImm): + val = int(dim) + assert val <= np.iinfo(np.int32).max + ret.append(tvm.expr.IntImm("int32", val)) + elif isinstance(dim, tvm.expr.Any): + ret.append(tvm.var("any_dim", "int32")) + else: + ret.append(dim) + return ret + + +def get_valid_implements(op, attrs, inputs, out_type, target): + """Get all valid implementations from the op strategy. + + Note that this function doesn't support op that has symbolic input shapes. + + Parameters + ---------- + op : relay.op.Op + Relay operator. + + attrs : object + The op attribute. + + inputs : list of tvm.Tensor + Input tensors to the op. + + out_type : relay.Type + The output type. + + target : tvm.Target + The target to compile the op. + + Returns + ------- + ret : list of relay.op.OpImplement + The list of op implementations. + """ + fstrategy = op.get_attr("FTVMStrategy") + assert fstrategy is not None, "%s doesn't have FTVMStrategy registered" % op.name + with target: + strategy = fstrategy(attrs, inputs, out_type, target) + ret = [] + for spec in strategy.specializations: + if spec.condition: + # check if all the clauses in the specialized condition are true + flag = True + for clause in spec.condition.clauses: + clause = tvm.ir_pass.Simplify(clause) + if isinstance(clause, tvm.expr.IntImm) and clause.value: + continue + flag = False + break + if flag: + for impl in spec.implements: + ret.append(impl) + else: + for impl in spec.implements: + ret.append(impl) + return ret + + +def select_implement(op, attrs, inputs, out_type, target, use_autotvm=True): + """Select the best implement from the op strategy. + + If use_autotvm is True, it'll first try to find the best implementation + based on AutoTVM profile results. If no AutoTVM profile result is found, + it'll choose the implementation with highest plevel. + + If use_autotvm is False, it'll directly choose the implementation with + highest plevel. + + Note that this function doesn't support op that has symbolic input shapes. + + Parameters + ---------- + op : relay.op.Op + Relay operator. + + attrs : object + The op attribute. + + inputs : list[tvm.Tensor] + Input tensors to the op. + + out_type : relay.Type + The output type. + + target : tvm.Target + The target to compile the op. + + use_autotvm : bool + Whether query AutoTVM to pick the best. + + Returns + ------- + ret : tuple(relay.op.OpImplement, list[tvm.Tensor]) + The best op implementation and the corresponding output tensors. + """ + all_impls = get_valid_implements(op, attrs, inputs, out_type, target) + + best_plevel_impl = None + for impl in all_impls: + if best_plevel_impl is None or int(impl.plevel) > int(best_plevel_impl.plevel): + best_plevel_impl = impl + if not use_autotvm: + outs = best_plevel_impl.compute(attrs, inputs, out_type) + return best_plevel_impl, outs + + outputs = {} + best_autotvm_impl = None + best_cfg = None + dispatch_ctx = autotvm.task.DispatchContext.current + for impl in all_impls: + outs = impl.compute(attrs, inputs, out_type) + outputs[impl] = outs + workload = autotvm.task.get_workload(outs) + if workload is None: + continue + cfg = dispatch_ctx.query(target, workload) + if cfg.cost is None: + # It's a fallback config + continue + if best_cfg is None or best_cfg.cost > cfg.cost: + best_autotvm_impl = impl + best_cfg = cfg + if best_autotvm_impl: + return best_autotvm_impl, outputs[best_autotvm_impl] + return best_plevel_impl, outputs[best_plevel_impl] + + +class ScheduleGetter(ExprVisitor): + """Get the schedule given a fused Relay function""" + + MAX_FUNC_NAME_LENGTH = 80 + + def __init__(self, target): + super().__init__() + self.target = target + self.master_op = None + self.master_attrs = None + self.master_op_pattern = 0 + self.master_implement = None + self.func_name = "" + self.scalars = [] + self._device_copy_op = _op.get("device_copy") + + def create(self, prim_func): + """Get the schedule and create the cached function""" + assert isinstance(prim_func, _expr.Function) + assert prim_func.is_primitive() + + def create_tensors(typ, tensors): + if isinstance(typ, _ty.TensorType): + tensors.append(tvm.placeholder(get_shape(typ.shape), typ.dtype)) + else: + assert isinstance(typ, _ty.TupleType) + for field in typ.fields: + create_tensors(field, tensors) + + inputs = [] + for param in prim_func.params: + tensors = [] + create_tensors(param.checked_type, tensors) + self.memo_map[param] = tensors + inputs.extend(tensors) + self.func_name = "fused" + outputs = self.visit(prim_func.body) + if len(self.func_name) > ScheduleGetter.MAX_FUNC_NAME_LENGTH: + hash_digest = int(hashlib.sha1(self.func_name).hexdigest(), 16) + self.func_name = "%s_%s" % ( + self.func_name[:ScheduleGetter.MAX_FUNC_NAME_LENGTH], hash_digest) + + assert self.master_op is not None + tensor_outs = [] + for tensor in outputs: + if not isinstance(tensor.op, tvm.tensor.PlaceholderOp): + tensor_outs.append(tensor) + sch = None + if not isinstance(self.master_attrs, _op.op_attrs.DeviceCopyAttrs): + # print('master op:', self.master_op.name) + sch = self.master_implement.schedule(self.master_attrs, tensor_outs, self.target) + for scalar in self.scalars: + sch[scalar].compute_inline() + return CachedFunc(self.target, self.func_name, inputs, outputs, sch) + + def visit_var(self, var): + assert False, "Found free variable " + var.name_hint + + def visit_constant(self, const): + assert len(const.data.shape) == 0, "Constant is not scalar" + dtype = const.data.dtype + data = const.data.asnumpy() + def fcompute(): + if dtype.startswith("int"): + return tvm.expr.IntImm(dtype, int(data)) + elif dtype.startswith("uint"): + return tvm.expr.UIntImm(dtype, int(data)) + elif dtype.startswith("float"): + return tvm.expr.FloatImm(dtype, float(data)) + else: + assert False, "not handled" + return tvm.expr.Expr() + value = tvm.compute((), fcompute, name="compile_engine_const", tag=tag.BROADCAST) + self.scalars.append(value.op) + return [value] + + def visit_call(self, call): + inputs = [] + count_tuple = 0 + for arg in call.args: + if isinstance(arg.checked_type, _ty.TupleType): + count_tuple += 1 + inputs.extend(self.visit(arg)) + assert count_tuple <= 1, "Only allow function with a single tuple input" + ret_type = call.checked_type + if isinstance(ret_type, _ty.TensorType): + ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype) + elif isinstance(ret_type, _ty.TupleType): + new_fields = [] + for field in ret_type.fields: + if isinstance(field, _ty.TensorType): + new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype)) + else: + new_fields.append(field) + ret_type = _ty.TupleType(new_fields) + assert isinstance(call.op, _op.Op) + op = call.op + + # disable AutoTVM tracing if op is not in wanted list + env = autotvm.task.TaskExtractEnv.current + reenable_tracing = False + if env is not None and env.tracing: + if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops: + env.tracing = False + reenable_tracing = True + + if op == self._device_copy_op: + copy_input = inputs[0] + outputs = [_api_internal._Tensor(copy_input.shape, copy_input.dtype, + None, 0)] + else: + is_dyn = call.checked_type.is_dynamic() + for arg in call.args: + is_dyn = is_dyn or arg.checked_type.is_dynamic() + + if not is_dyn: + best_impl, outputs = select_implement( + op, call.attrs, inputs, ret_type, self.target) + else: + # TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes + # for dynamic case, we currently use the implementation with highest plevel + best_impl, outputs = select_implement( + op, call.attrs, inputs, ret_type, self.target, use_autotvm=False) + op_pattern = op.get_attr("TOpPattern") + if op_pattern >= _op.OpPattern.COMM_REDUCE: + assert self.master_op is None or self.master_op_pattern < _op.OpPattern.COMM_REDUCE, \ + "Two complicated op in a primitive function master=%s current=%s" % ( + self.master_op, op) + if op_pattern >= self.master_op_pattern: + self.master_op = op + self.master_attrs = call.attrs + self.master_op_pattern = op_pattern + self.master_implement = best_impl + if len(outputs) > 1: + assert isinstance(call.checked_type, _ty.TupleType) + assert len(call.checked_type.fields) == len(outputs) + if op == self._device_copy_op: + self.func_name += "__copy" + else: + self.func_name += "_" + op.name + + # re-enable AutoTVM tracing + if reenable_tracing: + env.tracing = True + + return outputs + + def visit_let(self, let): + val = self.visit(let.value) + assert let.var not in self.memo_map + self.memo_map[let.var] = val + return self.visit(let.body) + + def visit_tuple(self, tup): + fields = [] + for field in tup.fields: + assert isinstance(field.checked_type, _ty.TensorType), "Only allow Tuple of Tensor" + res = self.visit(field) + assert len(res) == 1 + fields.append(res[0]) + return fields + + def visit_tuple_getitem(self, t): + tup = self.visit(t.tuple) + assert len(tup) == len(t.tuple.checked_type.fields) + assert t.index >= 0 + assert t.index < tup.size() + return [tup[t.index]] + + +@register_func("relay.backend.create_schedule") +def create_schedule(src_func, target): + return ScheduleGetter(target).create(src_func) + + @register_relay_node class CompileEngine(Object): """CompileEngine to get lowered code. diff --git a/python/tvm/relay/expr.py b/python/tvm/relay/expr.py index 39e68b8333ffb..22d89050298ce 100644 --- a/python/tvm/relay/expr.py +++ b/python/tvm/relay/expr.py @@ -277,6 +277,12 @@ def set_params(self, params): return _expr.FunctionSetParams(self, params) + def is_primitive(self): + return int(self.get_attribute("Primitive")) == 1 + + def get_attribute(self, name): + return _expr.FunctionGetAttr(self, name) + def set_attribute(self, name, ref): return _expr.FunctionSetAttr(self, name, ref) diff --git a/python/tvm/relay/expr_functor.py b/python/tvm/relay/expr_functor.py index f492c743173c8..8d6923920979e 100644 --- a/python/tvm/relay/expr_functor.py +++ b/python/tvm/relay/expr_functor.py @@ -131,22 +131,22 @@ class ExprVisitor(ExprFunctor): The default behavior recursively traverses the AST. """ - def visit_tuple(self, t): - for x in t.fields: + def visit_tuple(self, tup): + for x in tup.fields: self.visit(x) - def visit_call(self, c): - self.visit(c.op) - for a in c.args: + def visit_call(self, call): + self.visit(call.op) + for a in call.args: self.visit(a) - def visit_var(self, v): + def visit_var(self, var): pass - def visit_let(self, l): - self.visit(l.var) - self.visit(l.value) - self.visit(l.body) + def visit_let(self, let): + self.visit(let.var) + self.visit(let.value) + self.visit(let.body) def visit_function(self, f): self.visit(f.body) diff --git a/python/tvm/relay/memory_alloc.py b/python/tvm/relay/memory_alloc.py index d61c6f1d6fbab..f8e981121031e 100644 --- a/python/tvm/relay/memory_alloc.py +++ b/python/tvm/relay/memory_alloc.py @@ -28,8 +28,8 @@ def is_primitive(call): - return hasattr(call.op, 'attrs') and hasattr(call.op.attrs, 'Primitive') and \ - int(call.op.attrs.Primitive) == 1 + return hasattr(call, 'op') and hasattr(call.op, 'attrs') and \ + hasattr(call.op.attrs, 'Primitive') and int(call.op.attrs.Primitive) == 1 # TODO(@jroesch): port to c++ and unify with existing code class LinearizeRetType: diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py index bcd58ba5b1b12..8c22e35dfe6cf 100644 --- a/python/tvm/relay/op/__init__.py +++ b/python/tvm/relay/op/__init__.py @@ -17,9 +17,10 @@ #pylint: disable=wildcard-import, redefined-builtin """Relay core operators.""" # operator defs -from .op import get, register, register_schedule, register_compute, register_gradient, \ +from .op import get, register, register_compute, register_gradient, \ register_pattern, register_alter_op_layout, register_legalize, \ - schedule_injective, Op, OpPattern, debug + Op, OpPattern, debug +from . import strategy # Operators from .reduce import * diff --git a/python/tvm/relay/op/_algorithm.py b/python/tvm/relay/op/_algorithm.py index 09746be13e302..e1e6fd3a1139c 100644 --- a/python/tvm/relay/op/_algorithm.py +++ b/python/tvm/relay/op/_algorithm.py @@ -18,48 +18,14 @@ # pylint: disable=invalid-name,unused-argument from __future__ import absolute_import -import topi -from topi.util import get_const_int -from ..op import OpPattern, register_compute, register_schedule, register_pattern - - -@register_schedule("argsort") -def schedule_argsort(_, outs, target): - """Schedule definition of argsort""" - with target: - return topi.generic.schedule_argsort(outs) - - -@register_compute("argsort") -def compute_argsort(attrs, inputs, _, target): - """Compute definition of argsort""" - axis = get_const_int(attrs.axis) - is_ascend = bool(get_const_int(attrs.is_ascend)) - dtype = attrs.dtype - return [topi.argsort(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)] - +from . import strategy +from .op import OpPattern, register_pattern +from .op import register_strategy +# argsort +register_strategy("argsort", strategy.argsort_strategy) register_pattern("argsort", OpPattern.OPAQUE) - -@register_schedule("topk") -def schedule_topk(_, outs, target): - """Schedule definition of argsort""" - with target: - return topi.generic.schedule_topk(outs) - - -@register_compute("topk") -def compute_topk(attrs, inputs, _, target): - """Compute definition of argsort""" - k = get_const_int(attrs.k) - axis = get_const_int(attrs.axis) - ret_type = attrs.ret_type - is_ascend = bool(get_const_int(attrs.is_ascend)) - dtype = attrs.dtype - out = topi.topk(inputs[0], k, axis, ret_type, is_ascend, dtype) - out = out if isinstance(out, list) else [out] - return out - - +# topk +register_strategy("topk", strategy.topk_strategy) register_pattern("topk", OpPattern.OPAQUE) diff --git a/python/tvm/relay/op/_reduce.py b/python/tvm/relay/op/_reduce.py index 43f71c0aa6791..3103520bdfef1 100644 --- a/python/tvm/relay/op/_reduce.py +++ b/python/tvm/relay/op/_reduce.py @@ -17,33 +17,21 @@ """Backend compiler related feature registration""" from __future__ import absolute_import -import topi - from topi.util import get_const_int, get_const_tuple from . import op as _reg from ...api import convert from ...hybrid import script - -def _schedule_reduce(_, outs, target): - """Generic schedule for reduce""" - with target: - return topi.generic.schedule_reduce(outs) - - -_reg.register_schedule("argmax", _schedule_reduce) -_reg.register_schedule("argmin", _schedule_reduce) -_reg.register_schedule("sum", _schedule_reduce) -_reg.register_schedule("all", _schedule_reduce) -_reg.register_schedule("any", _schedule_reduce) -_reg.register_schedule("max", _schedule_reduce) -_reg.register_schedule("min", _schedule_reduce) -_reg.register_schedule("prod", _schedule_reduce) -_reg.register_schedule("mean", _schedule_reduce) -_reg.register_schedule("variance", _schedule_reduce) -_reg.register_schedule("nn.cross_entropy", _schedule_reduce) -_reg.register_schedule("nn.cross_entropy_with_logits", _schedule_reduce) - +_reg.register_strategy_reduce("argmax") +_reg.register_strategy_reduce("argmin") +_reg.register_strategy_reduce("sum") +_reg.register_strategy_reduce("all") +_reg.register_strategy_reduce("any") +_reg.register_strategy_reduce("max") +_reg.register_strategy_reduce("min") +_reg.register_strategy_reduce("prod") +_reg.register_strategy_reduce("mean") +_reg.register_strategy_reduce("variance") def _create_axis_record(attrs, inputs): axes = attrs.axis if attrs.axis is None else list(get_const_tuple(attrs.axis)) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index c1d02bd56d1b5..ebcb8e36aa651 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -19,101 +19,99 @@ from __future__ import absolute_import import topi from topi.util import get_const_tuple -from .op import register_compute, register_schedule, register_pattern, register_shape_func -from .op import schedule_injective, OpPattern +from .op import register_compute, register_shape_func +from .op import register_strategy_broadcast, register_strategy_injective +from .op import register_pattern, OpPattern from ...hybrid import script from ...api import convert -schedule_broadcast = schedule_injective -schedule_elemwise = schedule_injective - -register_schedule("log", schedule_broadcast) -register_schedule("cos", schedule_broadcast) -register_schedule("sin", schedule_broadcast) -register_schedule("atan", schedule_broadcast) -register_schedule("exp", schedule_broadcast) -register_schedule("erf", schedule_broadcast) -register_schedule("sqrt", schedule_broadcast) -register_schedule("rsqrt", schedule_broadcast) -register_schedule("sigmoid", schedule_broadcast) -register_schedule("floor", schedule_broadcast) -register_schedule("ceil", schedule_broadcast) -register_schedule("trunc", schedule_broadcast) -register_schedule("round", schedule_broadcast) -register_schedule("sign", schedule_broadcast) -register_schedule("abs", schedule_broadcast) -register_schedule("tanh", schedule_broadcast) -register_schedule("logical_not", schedule_broadcast) -register_schedule("bitwise_not", schedule_broadcast) -register_schedule("negative", schedule_broadcast) -register_schedule("copy", schedule_broadcast) - -register_schedule("add", schedule_broadcast) -register_schedule("subtract", schedule_broadcast) -register_schedule("multiply", schedule_broadcast) -register_schedule("divide", schedule_broadcast) -register_schedule("floor_divide", schedule_broadcast) -register_schedule("power", schedule_injective) -register_schedule("mod", schedule_broadcast) -register_schedule("floor_mod", schedule_broadcast) -register_schedule("logical_and", schedule_broadcast) -register_schedule("logical_or", schedule_broadcast) -register_schedule("bitwise_and", schedule_broadcast) -register_schedule("bitwise_or", schedule_broadcast) -register_schedule("bitwise_xor", schedule_broadcast) -register_schedule("equal", schedule_broadcast) -register_schedule("not_equal", schedule_broadcast) -register_schedule("less", schedule_broadcast) -register_schedule("less_equal", schedule_broadcast) -register_schedule("greater", schedule_broadcast) -register_schedule("greater_equal", schedule_broadcast) -register_schedule("maximum", schedule_injective) -register_schedule("minimum", schedule_injective) -register_schedule("right_shift", schedule_injective) -register_schedule("left_shift", schedule_injective) -register_schedule("shape_of", schedule_injective) + +register_strategy_broadcast("log") +register_strategy_broadcast("cos") +register_strategy_broadcast("sin") +register_strategy_broadcast("atan") +register_strategy_broadcast("exp") +register_strategy_broadcast("erf") +register_strategy_broadcast("sqrt") +register_strategy_broadcast("rsqrt") +register_strategy_broadcast("sigmoid") +register_strategy_broadcast("floor") +register_strategy_broadcast("ceil") +register_strategy_broadcast("trunc") +register_strategy_broadcast("round") +register_strategy_broadcast("sign") +register_strategy_broadcast("abs") +register_strategy_broadcast("tanh") +register_strategy_broadcast("add") +register_strategy_broadcast("subtract") +register_strategy_broadcast("multiply") +register_strategy_broadcast("divide") +register_strategy_broadcast("floor_divide") +register_strategy_broadcast("power") +register_strategy_broadcast("copy") +register_strategy_broadcast("logical_not") +register_strategy_broadcast("logical_and") +register_strategy_broadcast("logical_or") +register_strategy_broadcast("bitwise_not") +register_strategy_broadcast("bitwise_and") +register_strategy_broadcast("bitwise_or") +register_strategy_broadcast("bitwise_xor") +register_strategy_broadcast("negative") +register_strategy_broadcast("mod") +register_strategy_broadcast("floor_mod") +register_strategy_broadcast("equal") +register_strategy_broadcast("not_equal") +register_strategy_broadcast("less") +register_strategy_broadcast("less_equal") +register_strategy_broadcast("greater") +register_strategy_broadcast("greater_equal") +register_strategy_injective("maximum") +register_strategy_injective("minimum") +register_strategy_injective("right_shift") +register_strategy_injective("left_shift") +register_strategy_injective("shape_of") # zeros @register_compute("zeros") -def zeros_compute(attrs, inputs, output_type, target): +def zeros_compute(attrs, inputs, output_type): assert not inputs return [topi.full(output_type.shape, output_type.dtype, 0.0)] -register_schedule("zeros", schedule_broadcast) +register_strategy_broadcast("zeros") register_pattern("zeros", OpPattern.ELEMWISE) # zeros_like @register_compute("zeros_like") -def zeros_like_compute(attrs, inputs, output_type, target): +def zeros_like_compute(attrs, inputs, output_type): assert len(inputs) == 1 return [topi.full_like(inputs[0], 0.0)] -register_schedule("zeros_like", schedule_broadcast) +register_strategy_broadcast("zeros_like") # ones @register_compute("ones") -def ones_compute(attrs, inputs, output_type, target): +def ones_compute(attrs, inputs, output_type): assert not inputs return [topi.full(output_type.shape, output_type.dtype, 1.0)] -register_schedule("ones", schedule_broadcast) +register_strategy_broadcast("ones") register_pattern("ones", OpPattern.ELEMWISE) # ones_like @register_compute("ones_like") -def ones_like(attrs, inputs, output_type, target): +def ones_like_compute(attrs, inputs, output_type): assert len(inputs) == 1 return [topi.full_like(inputs[0], 1.0)] -register_schedule("ones_like", schedule_broadcast) +register_strategy_broadcast("ones_like") # clip @register_compute("clip") -def clip_compute(attrs, inputs, output_type, target): +def clip_compute(attrs, inputs, output_type): assert len(inputs) == 1 return [topi.clip(inputs[0], attrs.a_min, attrs.a_max)] -register_schedule("clip", schedule_elemwise) +register_strategy_injective("clip") @script def _cast_shape_function(x): @@ -198,6 +196,7 @@ def elemwise_shape_func(attrs, inputs, _): register_shape_func("floor_mod", False, broadcast_shape_func) register_shape_func("logical_and", False, broadcast_shape_func) register_shape_func("logical_or", False, broadcast_shape_func) +register_shape_func("bitwise_not", False, broadcast_shape_func) register_shape_func("bitwise_and", False, broadcast_shape_func) register_shape_func("bitwise_or", False, broadcast_shape_func) register_shape_func("bitwise_xor", False, broadcast_shape_func) diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index e6053b887d38c..ccc53cc6ef1da 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -21,52 +21,74 @@ import topi from topi.util import get_const_int, get_const_tuple from . import op as _reg -from ._reduce import _schedule_reduce +from . import strategy from .op import OpPattern from ...hybrid import script from ...api import convert -schedule_injective = _reg.schedule_injective -schedule_broadcast = _reg.schedule_injective -schedule_concatenate = _reg.schedule_concatenate - - -_reg.register_schedule("collapse_sum_like", _schedule_reduce) -_reg.register_schedule("broadcast_to", schedule_broadcast) -_reg.register_schedule("broadcast_to_like", schedule_broadcast) -_reg.register_schedule("expand_dims", schedule_broadcast) -_reg.register_schedule("squeeze", schedule_injective) -_reg.register_schedule("reshape", schedule_injective) -_reg.register_schedule("reshape_like", schedule_injective) -_reg.register_schedule("full", schedule_injective) -_reg.register_schedule("full_like", schedule_injective) -_reg.register_schedule("arange", schedule_injective) -_reg.register_schedule("reverse", schedule_injective) -_reg.register_schedule("repeat", schedule_broadcast) -_reg.register_schedule("tile", schedule_broadcast) -_reg.register_schedule("cast", schedule_injective) -_reg.register_schedule("cast_like", schedule_injective) -_reg.register_schedule("reinterpret", schedule_injective) -_reg.register_schedule("strided_slice", schedule_injective) -_reg.register_schedule("strided_set", schedule_injective) -_reg.register_schedule("slice_like", schedule_injective) -_reg.register_schedule("split", schedule_injective) -_reg.register_schedule("take", schedule_injective) -_reg.register_schedule("transpose", schedule_injective) -_reg.register_schedule("where", schedule_broadcast) -_reg.register_schedule("stack", schedule_injective) -_reg.register_schedule("concatenate", schedule_concatenate) -_reg.register_schedule("_contrib_reverse_reshape", schedule_injective) -_reg.register_schedule("gather_nd", schedule_injective) -_reg.register_schedule("sequence_mask", schedule_injective) -_reg.register_schedule("one_hot", schedule_injective) +_reg.register_strategy_broadcast("broadcast_to") +_reg.register_strategy_broadcast("broadcast_to_like") +_reg.register_strategy_broadcast("expand_dims") +_reg.register_strategy_broadcast("repeat") +_reg.register_strategy_broadcast("tile") +_reg.register_strategy_broadcast("where") +_reg.register_strategy_injective("squeeze") +_reg.register_strategy_injective("reshape") +_reg.register_strategy_injective("reshape_like") +_reg.register_strategy_injective("full") +_reg.register_strategy_injective("full_like") +_reg.register_strategy_injective("arange") +_reg.register_strategy_injective("reverse") +_reg.register_strategy_injective("cast") +_reg.register_strategy_injective("cast_like") +_reg.register_strategy_injective("reinterpret") +_reg.register_strategy_injective("strided_slice") +_reg.register_strategy_injective("slice_like") +_reg.register_strategy_injective("split") +_reg.register_strategy_injective("take") +_reg.register_strategy_injective("transpose") +_reg.register_strategy_injective("stack") +_reg.register_strategy_injective("_contrib_reverse_reshape") +_reg.register_strategy_injective("gather_nd") +_reg.register_strategy_injective("sequence_mask") +_reg.register_strategy_injective("one_hot") +_reg.register_strategy_reduce("collapse_sum_like") + +# concatenate +_reg.register_schedule("concatenate", strategy.schedule_concatenate) + +# strided_set +@_reg.register_compute("strided_set") +def compute_strided_set(attrs, inputs, output_type): + """Compute definition of strided_set""" + return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])] +_reg.register_strategy_injective("strided_set") # layout_transform -_reg.register_schedule("layout_transform", schedule_injective) +_reg.register_strategy_injective("layout_transform") _reg.register_pattern("layout_transform", OpPattern.INJECTIVE) -# shape func +# argwhere +@_reg.register_compute("argwhere") +def compute_argwhere(attrs, inputs, output_type): + """Compute definition of argwhere""" + output_shape = [] + for s in output_type.shape: + if hasattr(s, "value"): + output_shape.append(s) + else: + # see Any, replace it with a var + output_shape.append(tvm.var("any_dim", "int32")) + new_output_type = tvm.relay.ty.TensorType(output_shape, "int32") + return [topi.argwhere(new_output_type, inputs[0])] + +_reg.register_schedule("argwhere", strategy.schedule_argwhere) + +##################### +# Shape functions # +##################### + @script def _arange_shape_func(start, stop, step): out = output_tensor((1,), "int64") @@ -284,31 +306,6 @@ def argwhere_shape_func(attrs, inputs, out_ndims): return [_argwhere_shape_func_5d(inputs[0])] return ValueError("Does not support rank higher than 5 in argwhere") -@_reg.register_schedule("argwhere") -def schedule_argwhere(_, outs, target): - """Schedule definition of argwhere""" - with target: - return topi.generic.schedule_argwhere(outs) - - -@_reg.register_compute("argwhere") -def compute_argwhere(attrs, inputs, output_type, _): - """Compute definition of argwhere""" - output_shape = [] - for s in output_type.shape: - if hasattr(s, "value"): - output_shape.append(s) - else: - # see Any, replace it with a var - output_shape.append(tvm.var("any_dim", "int32")) - new_output_type = tvm.relay.ty.TensorType(output_shape, "int32") - return [topi.argwhere(new_output_type, inputs[0])] - -@_reg.register_compute("strided_set") -def compute_strided_set(attrs, inputs, output_type, _): - """Compute definition of strided_set""" - return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])] - @script def _layout_transform_shape_func(data_shape, out_layout_len, diff --git a/python/tvm/relay/op/annotation/annotation.py b/python/tvm/relay/op/annotation/annotation.py index 586c300856017..5fcc112787a3c 100644 --- a/python/tvm/relay/op/annotation/annotation.py +++ b/python/tvm/relay/op/annotation/annotation.py @@ -19,7 +19,7 @@ from tvm.runtime import TVMContext as _TVMContext from . import _make -from ..op import register_schedule, schedule_injective +from .. import op as reg def on_device(data, device): @@ -79,7 +79,7 @@ def checkpoint(data): """ return _make.checkpoint(data) -register_schedule("annotation.checkpoint", schedule_injective) +reg.register_strategy_injective("annotation.checkpoint") def compiler_begin(data, compiler): diff --git a/python/tvm/relay/op/contrib/_contrib.py b/python/tvm/relay/op/contrib/_contrib.py index 4b55880244113..16f22f1363c94 100644 --- a/python/tvm/relay/op/contrib/_contrib.py +++ b/python/tvm/relay/op/contrib/_contrib.py @@ -18,29 +18,19 @@ """Backend compiler related feature registration""" from __future__ import absolute_import -import topi from .. import op as reg -from ..op import schedule_injective, OpPattern +from .. import strategy +from ..op import OpPattern # adaptive_max_pool2d -@reg.register_schedule("contrib.adaptive_max_pool2d") -def schedule_adaptive_max_pool2d(_, outs, target): - """Schedule definition of adaptive_max_pool2d""" - with target: - return topi.generic.schedule_adaptive_pool(outs) - +reg.register_schedule("contrib.adaptive_max_pool2d", strategy.schedule_adaptive_pool) reg.register_pattern("contrib.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # adaptive_avg_pool2d -@reg.register_schedule("contrib.adaptive_avg_pool2d") -def schedule_adaptive_avg_pool2d(_, outs, target): - """Schedule definition of adaptive_avg_pool2d""" - with target: - return topi.generic.schedule_adaptive_pool(outs) - +reg.register_schedule("contrib.adaptive_avg_pool2d", strategy.schedule_adaptive_pool) reg.register_pattern("contrib.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # relay.contrib.ndarray_size -reg.register_schedule("contrib.ndarray_size", schedule_injective) +reg.register_strategy_injective("contrib.ndarray_size") diff --git a/python/tvm/relay/op/image/_image.py b/python/tvm/relay/op/image/_image.py index 89fde6dc17383..14a7080d59868 100644 --- a/python/tvm/relay/op/image/_image.py +++ b/python/tvm/relay/op/image/_image.py @@ -20,13 +20,10 @@ import topi from .. import op as reg -from ..op import schedule_injective # resize -reg.register_schedule("image.resize", schedule_injective) - @reg.register_compute("image.resize") -def compute_resize(attrs, inputs, out_type, target): +def compute_resize(attrs, inputs, out_type): size = attrs.size layout = attrs.layout method = attrs.method @@ -34,12 +31,12 @@ def compute_resize(attrs, inputs, out_type, target): out_dtype = attrs.out_dtype return [topi.image.resize(inputs[0], size, layout, method, coord_trans, out_dtype)] +reg.register_strategy_injective("image.resize") -# crop and resize -reg.register_schedule("image.crop_and_resize", schedule_injective) +# crop and resize @reg.register_compute("image.crop_and_resize") -def compute_crop_and_resize(attrs, inputs, out_type, target): +def compute_crop_and_resize(attrs, inputs, out_type): crop_size = attrs.crop_size layout = attrs.layout method = attrs.method @@ -48,3 +45,5 @@ def compute_crop_and_resize(attrs, inputs, out_type, target): return [topi.image.crop_and_resize(inputs[0], inputs[1], inputs[2], crop_size, layout, method, extrapolation_value, out_dtype)] + +reg.register_strategy_injective("image.crop_and_resize") diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index 3fdafd5b86281..4e0443fde59ff 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -21,253 +21,79 @@ import topi from topi.util import get_const_tuple from .. import op as reg -from ..op import OpPattern, schedule_injective +from .. import strategy +from ..op import OpPattern from .._tensor import elemwise_shape_func from ....api import convert from ....hybrid import script # relu -reg.register_schedule("nn.relu", schedule_injective) +reg.register_strategy_broadcast("nn.relu") reg.register_pattern("nn.relu", OpPattern.ELEMWISE) -# softmax -@reg.register_schedule("nn.softmax") -def schedule_softmax(_, outputs, target): - """Schedule definition of softmax""" - with target: - return topi.generic.schedule_softmax(outputs) - +# softmax +reg.register_schedule("nn.softmax", strategy.schedule_softmax) reg.register_pattern("nn.softmax", OpPattern.OPAQUE) -schedule_broadcast = schedule_injective - - -@reg.register_schedule("nn.log_softmax") -def schedule_log_softmax(_, outputs, target): - """Schedule definition of log_softmax""" - with target: - return topi.generic.schedule_softmax(outputs) - +# log_softmax +reg.register_schedule("nn.log_softmax", strategy.schedule_softmax) reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE) # dense -@reg.register_compute("nn.dense") -def compute_dense(attrs, inputs, out_type, target): - """Compute definition of dense""" - out_dtype = attrs.out_dtype - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - return [topi.nn.dense(inputs[0], inputs[1], None, out_dtype)] - - -@reg.register_schedule("nn.dense") -def schedule_dense(attrs, outputs, target): - """Schedule definition of dense""" - with target: - return topi.generic.schedule_dense(outputs) - - +reg.register_strategy("nn.dense", strategy.dense_strategy) reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE) +# fifo_buffer @reg.register_compute('nn.fifo_buffer') -def compute_fifo_buffer(attrs, inputs, out_type, target): +def compute_fifo_buffer(attrs, inputs, out_type): return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int('axis'))] - -@reg.register_schedule('nn.fifo_buffer') -def schedule_fifo_buffer(attrs, outputs, target): - with target: - return topi.generic.schedule_injective(outputs) - - +reg.register_strategy_injective("nn.fifo_buffer") reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE) # batch_matmul -@reg.register_compute("nn.batch_matmul") -def compute_batch_matmul(attrs, inputs, out_type, target): - """Compute definition of batch_matmul""" - with target: - return [topi.nn.batch_matmul(inputs[0], inputs[1])] - - -@reg.register_schedule("nn.batch_matmul") -def schedule_batch_matmul(attrs, outputs, target): - """Schedule definition of batch_matmul""" - with target: - return topi.generic.schedule_batch_matmul(outputs) - - +reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy) reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE) + # sparse_dense @reg.register_compute("nn.sparse_dense") -def compute_sparse_dense(attrs, inputs, out_type, target): +def compute_sparse_dense(attrs, inputs, out_type): """Compute definition of sparse_dense""" return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])] -@reg.register_schedule("nn.sparse_dense") -def schedule_sparse_dense(attrs, outputs, target): - """Schedule definition of batch_matmul""" - with target: - return topi.generic.schedule_sparse_dense(outputs) - +reg.register_schedule("nn.sparse_dense", strategy.schedule_sparse_dense) reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE) + # sparse_transpose @reg.register_compute("nn.sparse_transpose") -def compute_sparse_transpose(attrs, inputs, out_type, target): +def compute_sparse_transpose(attrs, inputs, out_type): """Compute definition of sparse_transpose""" return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2]) -@reg.register_schedule("nn.sparse_transpose") -def schedule_sparse_transpose(attrs, outputs, target): - """Schedule definition of batch_matmul""" - with target: - return topi.generic.schedule_sparse_transpose(outputs) - +reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose) reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE) -# Conv1D -@reg.register_compute("nn.conv1d") -def compute_conv1d(attrs, inputs, out_type, target): - """Compute definition of conv1d""" - strides = get_const_tuple(attrs.strides) - padding = get_const_tuple(attrs.padding) - dilation = get_const_tuple(attrs.dilation) - layout = attrs.data_layout - out_dtype = attrs.out_dtype - out_dtype = (inputs[0].dtype if out_dtype in ("same", "") - else out_dtype) - - assert layout in ["NCW", "NWC"] - if dilation[0] < 1: - raise ValueError("dilation should be a positive value") - - return [topi.nn.conv1d(inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype)] - - -@reg.register_schedule("nn.conv1d") -def schedule_conv1d(attrs, outs, target): - """Schedule definition of conv1d""" - layout = attrs.data_layout - - with target: - if layout == "NCW": - return topi.generic.schedule_conv1d_ncw(outs) - elif layout == "NCW": - return topi.generic.schedule_conv1d_nwc(outs) - raise ValueError("No compatible schedule") - - +# conv1d +reg.register_strategy("nn.conv1d", strategy.conv1d_strategy) reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE) # conv2d -def _find_conv2d_op(op): - """Find the op with conv2d in its tag by traversing.""" - if 'conv2d' in op.tag: - return op - for tensor in op.input_tensors: - op_ = _find_conv2d_op(tensor.op) - if op_ is not None: - return op_ - return None - -@reg.register_compute("nn.conv2d") -def compute_conv2d(attrs, inputs, out_type, target): - """Compute definition of conv2d""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - dilation = get_const_tuple(attrs.dilation) - groups = attrs.groups - layout = attrs.data_layout - kernel_layout = attrs.kernel_layout - out_dtype = attrs.out_dtype - out_dtype = (inputs[0].dtype if out_dtype in ("same", "") - else out_dtype) - - assert layout in ["NCHW", "NHWC", "NCHW4c", "HWCN"] - (dilation_h, dilation_w) = dilation - if dilation_h < 1 or dilation_w < 1: - raise ValueError("dilation should be positive value") - - def _get_out_depth(): - weight_shape = get_const_tuple(inputs[1].shape) - # NHWC layout - if kernel_layout.startswith("HW"): - return weight_shape[2] * weight_shape[3] - # NCHW layout. - # in ARM CPU contrib_spatial_pack schedule, we will prepack weight layout - if len(weight_shape) == 4: - return weight_shape[0] * weight_shape[1] - else: - assert len(weight_shape) == 5 - C, M, _, _, VC = weight_shape - return C * VC * M - - if groups == 1: - out = topi.nn.conv2d( - inputs[0], inputs[1], strides, padding, - dilation, layout, out_dtype) - elif layout == "NCHW" and _get_out_depth() == groups: - out = topi.nn.depthwise_conv2d_nchw( - inputs[0], inputs[1], strides, padding, dilation, out_dtype) - elif layout == "NHWC" and kernel_layout == "HWOI" and _get_out_depth() == groups: - out = topi.nn.depthwise_conv2d_nhwc( - inputs[0], inputs[1], strides, padding, dilation, out_dtype) - elif layout in ['NCHW', 'NCHW4c']: - out = topi.nn.group_conv2d_nchw(inputs[0], inputs[1], strides, padding, dilation, groups, - out_dtype) - else: - raise ValueError("not support arbitrary group number for now") - return [out] - - -@reg.register_schedule("nn.conv2d") -def schedule_conv2d(attrs, outs, target): - """Schedule definition of conv2d""" - groups = attrs.groups - layout = attrs.data_layout - kernel_layout = attrs.kernel_layout - - with target: - if groups == 1 and layout == "NCHW": - return topi.generic.schedule_conv2d_nchw(outs) - elif groups == 1 and layout == "NCHW4c": - return topi.generic.schedule_conv2d_nchw(outs) - elif groups == 1 and layout == "NHWC": - return topi.generic.schedule_conv2d_nhwc(outs) - elif groups == 1 and layout == "HWCN": - return topi.generic.schedule_conv2d_hwcn(outs) - elif groups != 1: - # collect in_channels to distinguish depthwise and group conv2d - op = _find_conv2d_op(outs[0].op) - assert op is not None - - is_depthwise = 'depthwise' in op.tag - if is_depthwise: - if layout == "NCHW": - # TODO(leyuan, merrymercy, Huyuwei): fold depthwise topi into conv2d. - return topi.generic.schedule_depthwise_conv2d_nchw(outs) - if layout == "NHWC" and kernel_layout == "HWOI": - return topi.generic.schedule_depthwise_conv2d_nhwc(outs) - else: - if layout in ["NCHW", "NCHW4c"]: - return topi.generic.schedule_group_conv2d_nchw(outs) - raise ValueError("No compatible schedule") - +reg.register_strategy("nn.conv2d", strategy.conv2d_strategy) +reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE) @reg.register_alter_op_layout("nn.conv2d") -def alter_op_layout_conv2d(attrs, inputs, tinfos): +def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type): """Alternate the layout of conv2d""" - # pylint: disable=import-outside-toplevel - from ... import op - return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op) + return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type) @reg.register_legalize("nn.conv2d") def legalize_conv2d(attrs, inputs, types): @@ -289,7 +115,6 @@ def legalize_conv2d(attrs, inputs, types): """ return topi.nn.conv2d_legalize(attrs, inputs, types) - @reg.register_convert_op_layout("nn.conv2d") def convert_conv2d(attrs, inputs, tinfos, desired_layout): """Convert Layout pass registration for conv2d op. @@ -330,82 +155,10 @@ def convert_conv2d(attrs, inputs, tinfos, desired_layout): return relay.nn.conv2d(data, weight, **new_attrs) return None -reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE) - # conv2d_transpose -@reg.register_compute("nn.conv2d_transpose") -def compute_conv2d_transpose(attrs, inputs, out_dtype, target): - """Compute definition of conv2d_transpose""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - dilation = get_const_tuple(attrs.dilation) - groups = attrs.groups - layout = attrs.data_layout - out_dtype = attrs.out_dtype - out_dtype = (inputs[0].dtype if out_dtype in ("same", "") - else out_dtype) - assert layout == "NCHW", "only support nchw for now" - assert dilation == (1, 1), "not support dilate now" - assert groups == 1, "only support groups == 1 for now" - out = topi.nn.conv2d_transpose_nchw( - inputs[0], inputs[1], strides, padding, out_dtype) - output_padding = get_const_tuple(attrs.output_padding) - out = topi.nn.pad(out, - [0, 0, 0, 0], [0, 0, output_padding[0], output_padding[1]]) - return [out] - - -@reg.register_compute("nn.conv3d") -def compute_conv3d(attrs, inputs, out_type, target): - """Compute definition of conv3d""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - dilation = get_const_tuple(attrs.dilation) - groups = attrs.groups - layout = attrs.data_layout - out_dtype = attrs.out_dtype - out_dtype = (inputs[0].dtype if out_dtype in ("same", "") - else out_dtype) - - assert layout in ["NCDHW", "NDHWC"] - (dilation_d, dilation_h, dilation_w) = dilation - if dilation_d < 1 or dilation_h < 1 or dilation_w < 1: - raise ValueError("dilation should be positive value") - - if groups == 1: - out = topi.nn.conv3d( - inputs[0], inputs[1], strides, padding, - dilation, layout, out_dtype) - else: - raise ValueError("not support arbitrary group number for now") - return [out] - - -@reg.register_schedule("nn.conv3d") -def schedule_conv3d(attrs, outs, target): - """Schedule definition of conv3d""" - groups = attrs.groups - layout = attrs.data_layout - - with target: - if groups == 1 and layout == "NCDHW": - return topi.generic.schedule_conv3d_ncdhw(outs) - elif groups == 1 and layout == "NDHWC": - return topi.generic.schedule_conv3d_ndhwc(outs) - - raise ValueError("No compatible schedule") - - -reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE) - - -@reg.register_schedule("nn.conv2d_transpose") -def schedule_conv2d_transpose(attrs, outs, target): - """Schedule definition of conv2d_transpose""" - with target: - return topi.generic.schedule_conv2d_transpose_nchw(outs) - +reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy) +reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE) @reg.register_legalize("nn.conv2d_transpose") def legalize_conv2d_transpose(attrs, inputs, types): @@ -427,202 +180,102 @@ def legalize_conv2d_transpose(attrs, inputs, types): """ return topi.nn.conv2d_transpose_legalize(attrs, inputs, types) -reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE) - -# conv1d_transpose -@reg.register_compute("nn.conv1d_transpose") -def compute_conv1d_transpose(attrs, inputs, out_dtype, target): - """Compute definition of conv1d_transpose""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - dilation = get_const_tuple(attrs.dilation) - groups = attrs.groups - layout = attrs.data_layout - out_dtype = attrs.out_dtype - out_dtype = (inputs[0].dtype if out_dtype in ("same", "") - else out_dtype) - assert layout == "NCW", "conv1d_transpose ncw only supported" - assert dilation == (1,), "conv1d_transpose dilation is not supported" - assert groups == 1, "conv1d_transpose groups == 1 only supported" - out = topi.nn.conv1d_transpose_ncw( - inputs[0], inputs[1], strides, padding, out_dtype) - output_padding = get_const_tuple(attrs.output_padding) - out = topi.nn.pad(out, - [0, 0, 0], [0, 0, output_padding[0]]) - return [out] +# conv3d +reg.register_strategy("nn.conv3d", strategy.conv3d_strategy) +reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE) -@reg.register_schedule("nn.conv1d_transpose") -def schedule_conv1d_transpose(attrs, outs, target): - """Schedule definition of conv1d_transpose""" - with target: - return topi.generic.schedule_conv1d_transpose_ncw(outs) +# conv1d_transpose +reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy) reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE) + # bias_add -reg.register_schedule("nn.bias_add", schedule_injective) +reg.register_strategy_injective("nn.bias_add") reg.register_pattern("nn.bias_add", OpPattern.BROADCAST) # max_pool1d -@reg.register_schedule("nn.max_pool1d") -def schedule_max_pool1d(attrs, outs, target): - """Schedule definition of max_pool1d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - - +reg.register_schedule("nn.max_pool1d", strategy.schedule_pool) reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE) # max_pool2d -@reg.register_schedule("nn.max_pool2d") -def schedule_max_pool2d(attrs, outs, target): - """Schedule definition of max_pool2d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - - +reg.register_schedule("nn.max_pool2d", strategy.schedule_pool) reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # max_pool3d -@reg.register_schedule("nn.max_pool3d") -def schedule_max_pool3d(attrs, outs, target): - """Schedule definition of max_pool3d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - - +reg.register_schedule("nn.max_pool3d", strategy.schedule_pool) reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE) # avg_pool1d -@reg.register_schedule("nn.avg_pool1d") -def schedule_avg_pool1d(attrs, outs, target): - """Schedule definition of avg_pool1d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - - +reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool) reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE) # avg_pool2d -@reg.register_schedule("nn.avg_pool2d") -def schedule_avg_pool2d(attrs, outs, target): - """Schedule definition of avg_pool2d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - +reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool) reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # avg_pool3d -@reg.register_schedule("nn.avg_pool3d") -def schedule_avg_pool3d(attrs, outs, target): - """Schedule definition of avg_pool3d""" - layout = attrs.layout - with target: - return topi.generic.schedule_pool(outs, layout) - - +reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool) reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE) # max_pool2d_grad -@reg.register_schedule("nn.max_pool2d_grad") -def schedule_max_pool2d_grad(attrs, outs, target): - """Schedule definition of max_pool2d_grad""" - with target: - return topi.generic.schedule_pool_grad(outs) - - +reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad) reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE) # avg_pool2d_grad -@reg.register_schedule("nn.avg_pool2d_grad") -def schedule_avg_pool2d_grad(attrs, outs, target): - """Schedule definition of avg_pool2d_grad""" - with target: - return topi.generic.schedule_pool_grad(outs) - - +reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad) reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE) # global_max_pool2d -@reg.register_schedule("nn.global_max_pool2d") -def schedule_global_max_pool2d(_, outs, target): - """Schedule definition of global_max_pool2d""" - with target: - return topi.generic.schedule_adaptive_pool(outs) - - +reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool) reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # global_avg_pool2d -@reg.register_schedule("nn.global_avg_pool2d") -def schedule_global_avg_pool2d(_, outs, target): - """Schedule definition of global_avg_pool2d""" - with target: - return topi.generic.schedule_adaptive_pool(outs) - - +reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool) reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) # leaky_relu -reg.register_schedule("nn.leaky_relu", schedule_broadcast) +reg.register_strategy_broadcast("nn.leaky_relu") reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE) + # prelu -reg.register_schedule("nn.prelu", schedule_broadcast) +reg.register_strategy_broadcast("nn.prelu") reg.register_pattern("nn.prelu", OpPattern.BROADCAST) + # flatten -reg.register_schedule("nn.batch_flatten", schedule_broadcast) +reg.register_strategy_broadcast("nn.batch_flatten") reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE) # lrn @reg.register_compute("nn.lrn") -def compute_lrn(attrs, inputs, out_dtype, target): +def compute_lrn(attrs, inputs, out_dtype): """Compute definition of lrn""" assert len(inputs) == 1 return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)] - -@reg.register_schedule("nn.lrn") -def schedule_lrn(attrs, outs, target): - """Schedule definition of lrn""" - with target: - return topi.generic.schedule_lrn(outs) - - +reg.register_schedule("nn.lrn", strategy.schedule_lrn) reg.register_pattern("nn.lrn", OpPattern.OPAQUE) # upsampling -reg.register_schedule("nn.upsampling", reg.schedule_injective) - - -def schedule_upsampling(_, outs, target): - """Schedule definition of upsampling""" - with target: - return topi.generic.schedule_injective(outs) - @reg.register_compute("nn.upsampling") -def compute_upsampling(attrs, inputs, out_dtype, target): +def compute_upsampling(attrs, inputs, out_dtype): scale_h = attrs.scale_h scale_w = attrs.scale_w layout = attrs.layout @@ -630,16 +283,12 @@ def compute_upsampling(attrs, inputs, out_dtype, target): align_corners = attrs.align_corners return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)] -# upsampling3d -reg.register_schedule("nn.upsampling3d", reg.schedule_injective) +reg.register_strategy_injective("nn.upsampling") -def schedule_upsampling3d(_, outs, target): - """Schedule definition of upsampling3d""" - with target: - return topi.generic.schedule_injective(outs) +# upsampling3d @reg.register_compute("nn.upsampling3d") -def compute_upsampling3d(attrs, inputs, out_dtype, target): +def compute_upsampling3d(attrs, inputs, out_dtype): scale_d = attrs.scale_d scale_h = attrs.scale_h scale_w = attrs.scale_w @@ -649,12 +298,14 @@ def compute_upsampling3d(attrs, inputs, out_dtype, target): return [topi.nn.upsampling3d(inputs[0], scale_d, scale_h, scale_w, layout, method,\ coordinate_transformation_mode)] +reg.register_strategy_injective("nn.upsampling3d") + + # pad -reg.register_schedule("nn.pad", schedule_broadcast) +reg.register_strategy_broadcast("nn.pad") -# mirror_pad -reg.register_schedule("nn.mirror_pad", schedule_broadcast) +# mirror_pad @reg.register_compute("nn.mirror_pad") def compute_mirror_pad(attrs, inputs, out_dtype, target): pad_before, pad_after = list(zip(*attrs.pad_width)) @@ -662,284 +313,78 @@ def compute_mirror_pad(attrs, inputs, out_dtype, target): out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode) return [out] -# winograd related operators -@reg.register_compute("nn.contrib_conv2d_winograd_without_weight_transform") -def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, out_dtype, target): - """Compute definition of conv2d_winograd_without_weight_transform""" - # pylint: disable=assignment-from-no-return - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - groups = attrs.get_int("groups") - data_layout = attrs.get_str("data_layout") - out_dtype = attrs.get_str("out_dtype") - tile_size = attrs.get_int("tile_size") - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - assert dilation == (1, 1), "Do not support dilate now" - assert groups == 1, "Do not supoort arbitrary group number" - - out = topi.nn.conv2d_winograd_without_weight_transform( - inputs[0], inputs[1], strides, padding, dilation, data_layout, - out_dtype, tile_size) - - return [out] - - -@reg.register_schedule("nn.contrib_conv2d_winograd_without_weight_transform") -def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target): - """Schedule definition of conv2d_winograd_without_weight_transform""" - with target: - return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs) +reg.register_strategy_broadcast("nn.mirror_pad") +# conv2d_winograd related operators +reg.register_strategy("nn.contrib_conv2d_winograd_without_weight_transform", + strategy.conv2d_winograd_without_weight_transfrom_strategy) reg.register_pattern("nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE) @reg.register_compute("nn.contrib_conv2d_winograd_weight_transform") -def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype, target): +def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype): """Compute definition of contrib_conv2d_winograd_weight_transform""" out = topi.nn.conv2d_winograd_weight_transform( inputs[0], attrs.get_int('tile_size')) return [out] - -@reg.register_schedule("nn.contrib_conv2d_winograd_weight_transform") -def schedule_contrib_conv2d_winograd_weight_transform(attrs, outs, target): - """Schedule definition of contrib_conv2d_winograd_weight_transform""" - with target: - return topi.generic.schedule_conv2d_winograd_weight_transform(outs) - - +reg.register_schedule("nn.contrib_conv2d_winograd_weight_transform", + strategy.schedule_conv2d_winograd_weight_transform) reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE) - -# winograd nnpack related operators -@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_without_weight_transform") -def compute_contrib_conv2d_winograd_nnpack_without_weight_transform( - attrs, inputs, out_dtype, target): - """Compute definition of conv2d_winograd_nnpack_without_weight_transform""" - # pylint: disable=assignment-from-no-return - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - groups = attrs.get_int("groups") - data_layout = attrs.get_str("data_layout") - out_dtype = attrs.get_str("out_dtype") - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - assert dilation == (1, 1), "Do not support dilate now" - assert groups == 1, "Do not supoort arbitrary group number" - - # No bias - out = topi.nn.conv2d_winograd_nnpack_without_weight_transform( - inputs[0], inputs[1], None, strides, padding, dilation, data_layout, - out_dtype) - - return [out] - - -@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_without_weight_transform") -def schedule_contrib_conv2d_winograd_nnpack_without_weight_transform(attrs, outs, target): - """Schedule definition of conv2d_winograd_nnpack_without_weight_transform""" - with target: - return topi.generic.schedule_conv2d_winograd_nnpack_without_weight_transform(outs) - - -reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_without_weight_transform", - OpPattern.OPAQUE) - - @reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform") -def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype, target): +def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype): """Compute definition of contrib_conv2d_winograd_nnpack_weight_transform""" convolution_algorithm = attrs.get_int('convolution_algorithm') out = topi.nn.conv2d_winograd_nnpack_weight_transform( inputs[0], convolution_algorithm, out_dtype) return [out] - -@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_weight_transform") -def schedule_contrib_conv2d_winograd_nnpack_weight_transform(attrs, outs, target): - """Schedule definition of contrib_conv2d_winograd_nnpack_weight_transform""" - with target: - return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs) - - +reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_weight_transform", + strategy.schedule_conv2d_winograd_nnpack_weight_transform) reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE) -@reg.register_compute("nn.contrib_conv2d_NCHWc") -def compute_contrib_conv2d_NCHWc(attrs, inputs, out_dtype, target): - """Compute definition of conv2d NCHWc""" - # pylint: disable=assignment-from-no-return - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - data_layout = attrs.get_str("data_layout") - out_layout = attrs.get_str("out_layout") - out_dtype = attrs.get_str("out_dtype") - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - - out = topi.nn.conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation, - data_layout, out_layout, out_dtype) - return [out] - - -@reg.register_schedule("nn.contrib_conv2d_NCHWc") -def schedule_contrib_conv2d_NCHWc(attrs, outs, target): - """Schedule definition of contrib_conv2d_NCHWc""" - with target: - return topi.generic.schedule_conv2d_NCHWc(outs) - - +# conv2d_NCHWc +reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy) reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE) - -@reg.register_compute("nn.contrib_conv2d_NCHWc_int8") -def compute_contrib_conv2d_NCHWc_int8(attrs, inputs, out_dtype, target): - """Compute definition of conv2d NCHWc""" - # pylint: disable=assignment-from-no-return - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - data_layout = attrs.get_str("data_layout") - out_layout = attrs.get_str("out_layout") - out_dtype = attrs.get_str("out_dtype") - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - - out = topi.nn.conv2d_NCHWc_int8(inputs[0], inputs[1], strides, padding, dilation, - data_layout, out_layout, out_dtype) - return [out] - - -@reg.register_schedule("nn.contrib_conv2d_NCHWc_int8") -def schedule_contrib_conv2d_NCHWc_int8(attrs, outs, target): - """Schedule definition of contrib_conv2d_NCHWc_int8""" - with target: - return topi.generic.schedule_conv2d_NCHWc_int8(outs) - - -reg.register_pattern("nn.contrib_conv2d_NCHWc_int8", - OpPattern.OUT_ELEMWISE_FUSABLE) - - -@reg.register_compute("nn.contrib_depthwise_conv2d_NCHWc") -def compute_contrib_depthwise_conv2d_NCHWc(attrs, inputs, out_dtype, target): - """Compute definition of depthwise conv2d NCHWc""" - # pylint: disable=assignment-from-no-return - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - data_layout = attrs.get_str("data_layout") - out_layout = attrs.get_str("out_layout") - out_dtype = attrs.get_str("out_dtype") - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - - out = topi.nn.depthwise_conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation, - data_layout, out_layout, out_dtype) - return [out] - - -@reg.register_schedule("nn.contrib_depthwise_conv2d_NCHWc") -def schedule_contrib_depthwise_conv2d_NCHWc(attrs, outs, target): - """Schedule definition of contrib_conv2d_NCHWc""" - with target: - return topi.generic.schedule_depthwise_conv2d_NCHWc(outs) - - +# depthwise_conv2d_NCHWc +reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", + strategy.depthwise_conv2d_NCHWc_strategy) reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE) -@reg.register_compute("nn.deformable_conv2d") -def compute_deformable_conv2d(attrs, inputs, out_dtype, target): - """Compute definition of deformable_conv2d""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - dilation = get_const_tuple(attrs.dilation) - deformable_groups = attrs.deformable_groups - groups = attrs.groups - out_dtype = attrs.out_dtype - out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype - with target: - out = topi.nn.deformable_conv2d_nchw(inputs[0], inputs[1], inputs[2], strides, padding, - dilation, deformable_groups, groups, out_dtype) - return [out] - - -@reg.register_schedule("nn.deformable_conv2d") -def schedule_deformable_conv2d(attrs, outs, target): - """Schedule definition of deformable_conv2d""" - with target: - return topi.generic.schedule_deformable_conv2d_nchw(outs) - - +# deformable_conv2d +reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy) reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE) +# bitpack @reg.register_compute("nn.bitpack") -def compute_bitpack(attrs, inputs, out_dtype, target): +def compute_bitpack(attrs, inputs, out_dtype): """Compute definition for bitpack""" bits = attrs.bits pack_axis = attrs.pack_axis bit_axis = attrs.bit_axis pack_type = attrs.pack_type name = attrs.name - with target: - out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, - name) + out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name) return [out] -@reg.register_schedule("nn.bitpack") -def schedule_bitpack(attrs, outs, target): - with target: - return topi.generic.schedule_bitpack(outs) - +reg.register_schedule("nn.bitpack", strategy.schedule_bitpack) reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE) -@reg.register_compute("nn.bitserial_conv2d") -def compute_bitserial_conv2d(attrs, inputs, out_dtype, target): - """Compute definition for bitserial conv2d.""" - padding = get_const_tuple(attrs.padding) - strides = get_const_tuple(attrs.strides) - activation_bits = attrs.activation_bits - weight_bits = attrs.weight_bits - layout = attrs.data_layout - pack_dtype = attrs.pack_dtype - out_dtype = attrs.out_dtype - unipolar = attrs.unipolar - if layout == 'NCHW': - with target: - out = topi.nn.bitserial_conv2d_nchw( - inputs[0], inputs[1], strides, padding, activation_bits, - weight_bits, pack_dtype, out_dtype, unipolar) - elif layout == 'NHWC': - with target: - out = topi.nn.bitserial_conv2d_nhwc( - inputs[0], inputs[1], strides, padding, activation_bits, - weight_bits, pack_dtype, out_dtype, unipolar) - else: - raise ValueError("Data layout not supported.") - - return [out] - - -@reg.register_schedule("nn.bitserial_conv2d") -def schedule_bitserial_conv2d(attrs, outs, target): - """Schedule definition for bitserial conv2d.""" - layout = attrs.data_layout - if layout == 'NCHW': - with target: - return topi.generic.schedule_bitserial_conv2d_nchw(outs) - elif layout == 'NHWC': - with target: - return topi.generic.schedule_bitserial_conv2d_nhwc(outs) - else: - raise ValueError("Data layout not supported.") +# bitserial_conv2d +reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy) +reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE) @reg.register_legalize("nn.bitserial_conv2d") def legalize_bitserial_conv2d(attrs, inputs, types): @@ -962,79 +407,58 @@ def legalize_bitserial_conv2d(attrs, inputs, types): return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types) -reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE) - - # bitserial_dense -@reg.register_compute("nn.bitserial_dense") -def compute_bitserial_dense(attrs, inputs, out_type, target): - """Compute definition of bitserial_dense""" - data_bits = attrs.data_bits - weight_bits = attrs.weight_bits - pack_dtype = attrs.pack_dtype - out_dtype = attrs.out_dtype - out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - unipolar = attrs.unipolar - return [ - topi.nn.bitserial_dense( - inputs[0], - inputs[1], - data_bits, - weight_bits, - pack_dtype, - out_dtype, - unipolar) - ] - - -@reg.register_schedule("nn.bitserial_dense") -def schedule_bitserial_dense(attrs, outputs, target): - """Schedule definition of bitserial_dense""" - with target: - return topi.generic.schedule_bitserial_dense(outputs) - - +reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy) reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE) -reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE) - +# cross_entropy @reg.register_compute("nn.cross_entropy") -def compute_cross_entropy(attrs, inputs, out_dtype, target): +def compute_cross_entropy(attrs, inputs, out_dtype): x, y = inputs return [-topi.sum(topi.log(x) * y) / x.shape[0]] +reg.register_strategy_reduce("nn.cross_entropy") +reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE) -reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE) +# cross_entropy_with_logits @reg.register_compute("nn.cross_entropy_with_logits") -def compute_cross_entropy_with_logits(attrs, inputs, out_dtype, target): +def compute_cross_entropy_with_logits(attrs, inputs, out_dtype): x, y = inputs return [-topi.sum(x * y) / x.shape[0]] +reg.register_strategy_reduce("nn.cross_entropy_with_logits") +reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE) + +# depth_to_space @reg.register_compute("nn.depth_to_space") -def compute_depth_to_space(attrs, inputs, out_dtype, target): +def compute_depth_to_space(attrs, inputs, out_dtype): block_size = attrs.block_size layout = attrs.layout mode = attrs.mode return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)] -reg.register_schedule("nn.depth_to_space", schedule_injective) +reg.register_strategy_injective("nn.depth_to_space") reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE) +# space_to_depth @reg.register_compute("nn.space_to_depth") -def compute_space_to_depth(attrs, inputs, out_dtype, target): +def compute_space_to_depth(attrs, inputs, out_dtype): block_size = attrs.block_size layout = attrs.layout return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)] -reg.register_schedule("nn.space_to_depth", schedule_injective) +reg.register_strategy_injective("nn.space_to_depth") reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE) -# shape func +##################### +# Shape functions # +##################### + @script def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn): out = output_tensor((dshape.shape[0],), "int64") diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index c76d5ed371354..abc9791edd750 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -204,7 +204,8 @@ def conv2d(data, # TODO enforce 4-way padding in topi/nn/conv2d after #4644 merged # convert 2-way padding to 4-way padding padding = get_pad_tuple2d(padding) - + if not out_layout: + out_layout = data_layout return _make.conv2d(data, weight, strides, padding, dilation, groups, channels, kernel_size, data_layout, kernel_layout, out_layout, out_dtype) @@ -298,7 +299,8 @@ def conv3d(data, dilation = (dilation, dilation, dilation) if isinstance(padding, int): padding = (padding, padding, padding) - + if not out_layout: + out_layout = data_layout return _make.conv3d(data, weight, strides, padding, dilation, groups, channels, kernel_size, data_layout, kernel_layout, out_layout, out_dtype) @@ -367,6 +369,8 @@ def conv2d_transpose(data, """ # convert 2-way padding to 4-way padding padding = get_pad_tuple2d(padding) + if not out_layout: + out_layout = data_layout return _make.conv2d_transpose(data, weight, strides, padding, dilation, groups, channels, kernel_size, data_layout, kernel_layout, out_layout, output_padding, out_dtype) @@ -433,6 +437,8 @@ def conv1d_transpose(data, result : tvm.relay.Expr The computed result. """ + if not out_layout: + out_layout = data_layout return _make.conv1d_transpose(data, weight, strides, padding, dilation, groups, channels, kernel_size, data_layout, kernel_layout, out_layout, output_padding, out_dtype) @@ -1770,74 +1776,6 @@ def contrib_conv2d_winograd_without_weight_transform(data, kernel_layout, out_layout, out_dtype) -def contrib_conv2d_winograd_nnpack_without_weight_transform(data, - weight, - strides=(1, 1), - padding=(0, 0), - dilation=(1, 1), - groups=1, - channels=None, - kernel_size=None, - data_layout="NCHW", - kernel_layout="OIHW", - out_layout="", - out_dtype=""): - r"""2D convolution with the NNPACK implementation of winograd algorithm. - - The basic parameters are the same as the ones in vanilla conv2d. - It assumes the weight is pre-transformed by nn.contrib_conv2d_winograd_nnpack_weight_transform - - Parameters - ---------- - data : tvm.relay.Expr - The input data to the operator. - - weight : tvm.relay.Expr - The weight expressions. - - strides : tuple of int, optional - The strides of convolution. - - padding : tuple of int, optional - The padding of convolution on both sides of inputs before convolution. - - dilation : tuple of int, optional - Specifies the dilation rate to be used for dilated convolution. - - groups : int, optional - Number of groups for grouped convolution. - - channels : int, optional - Number of output channels of this convolution. - - kernel_size : tuple of int, optional - The spatial of the convolution kernel. - - data_layout : str, optional - Layout of the input. - - kernel_layout : str, optional - Layout of the weight. - - out_layout : str, optional - Layout of the output, by default, out_layout is the same as data_layout - - out_dtype : str, optional - Specifies the output data type for mixed precision conv2d. - - Returns - ------- - result : tvm.relay.Expr - The computed result. - """ - # convert 2-way padding to 4-way padding - padding = get_pad_tuple2d(padding) - return _make.contrib_conv2d_winograd_nnpack_without_weight_transform( - data, weight, strides, padding, dilation, - groups, channels, kernel_size, data_layout, - kernel_layout, out_layout, out_dtype) - - def contrib_conv2d_nchwc(data, kernel, strides=(1, 1), @@ -1972,73 +1910,6 @@ def contrib_depthwise_conv2d_nchwc(data, groups, channels, kernel_size, data_layout, kernel_layout, out_layout, out_dtype) -def contrib_conv2d_nchwc_int8(data, - kernel, - strides=(1, 1), - padding=(0, 0), - dilation=(1, 1), - groups=1, - channels=None, - kernel_size=None, - data_layout="NCHW8c", - kernel_layout="OIHW", - out_layout="", - out_dtype=""): - r"""Variant of 2D convolution. It deals with only int8 inputs. - - This operator takes the weight as the convolution kernel - and convolves it with data to produce an output, following a specialized - NCHWc data layout. - - Parameters - ---------- - data : tvm.relay.Expr - The input data to the operator. - - kernel : tvm.relay.Expr - The kernel expressions. - - strides : tuple of int, optional - The strides of convolution. - - padding : tuple of int, optional - The padding of convolution on both sides of inputs before convolution. - - dilation : tuple of int, optional - Specifies the dilation rate to be used for dilated convolution. - - groups : int, optional - Number of groups for grouped convolution. - - channels : int, optional - Number of output channels of this convolution. - - kernel_size : tuple of int, optional - The spatial of the convolution kernel. - - data_layout : str, optional - Layout of the input. - - kernel_layout : str, optional - Layout of the weight. - - out_layout : str, optional - Layout of the output, by default, out_layout is the same as data_layout - - out_dtype : str, optional - Specifies the output data type for mixed precision conv2d. - - Returns - ------- - result : tvm.relay.Expr - The computed result. - """ - # convert 2-way padding to 4-way padding - padding = get_pad_tuple2d(padding) - return _make.contrib_conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, - groups, channels, kernel_size, data_layout, - kernel_layout, out_layout, out_dtype) - def contrib_conv2d_winograd_weight_transform(weight, tile_size): diff --git a/python/tvm/relay/op/op.py b/python/tvm/relay/op/op.py index c6d301213e98d..0a1500203db20 100644 --- a/python/tvm/relay/op/op.py +++ b/python/tvm/relay/op/op.py @@ -14,15 +14,15 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -#pylint: disable=unused-argument +#pylint: disable=unused-argument,invalid-name """The base node types for the Relay language.""" -import topi import tvm._ffi from tvm.driver import lower, build from ..base import register_relay_node from ..expr import RelayExpr from ...api import register_func +from ...target import get_native_generic_func, GenericFunc from . import _make @register_relay_node @@ -143,21 +143,47 @@ class OpPattern(object): OPAQUE = 8 -def register_schedule(op_name, schedule=None, level=10): - """Register schedule function for an op +@register_relay_node +class OpImplement(Expr): + """Operator implementation""" + def compute(self, attrs, inputs, out_type): + return _OpImplementCompute(self, attrs, inputs, out_type) - Parameters - ---------- - op_name : str - The name of the op. + def schedule(self, attrs, outs, target): + return _OpImplementSchedule(self, attrs, outs, target) - schedule : function (attrs: Attrs, outs: List[Tensor], target: Target) -> sch: Schedule - The schedule function. - level : int - The priority level - """ - return register(op_name, "FTVMSchedule", schedule, level) +@register_relay_node +class OpSpecialization(Expr): + """Operator specialization""" + + +@register_relay_node +class OpStrategy(Expr): + def __init__(self): + self.__init_handle_by_constructor__(_make.OpStrategy) + + def add_implement(self, compute, schedule, plevel=10): + _OpStrategyAddImplement(self, compute, schedule, plevel) + + +def wrap_fstrategy(compute, schedule): + def fstrategy(attrs, inputs, out_type, target): + strategy = OpStrategy() + strategy.add_implement(compute, schedule) + return strategy + return fstrategy + + +def create_simple_fstrategy(op_name, schedule): + assert hasattr(schedule, "dispatch_dict") + compute = get(op_name).get_attr("FTVMCompute") + assert compute is not None, "FTVMCompute is not registered for op %s" % op_name + fstrategy = get_native_generic_func("{}_strategy".format(op_name)) + fstrategy.set_default(wrap_fstrategy(compute, schedule.fdefault)) + for key, sch in schedule.dispatch_dict.items(): + fstrategy.register(wrap_fstrategy(compute, sch), [key]) + return fstrategy def register_compute(op_name, compute=None, level=10): @@ -178,6 +204,30 @@ def register_compute(op_name, compute=None, level=10): return register(op_name, "FTVMCompute", compute, level) +def register_strategy(op_name, fstrategy=None, level=10): + if not isinstance(fstrategy, GenericFunc): + assert hasattr(fstrategy, "generic_func_node") + fstrategy = fstrategy.generic_func_node + return register(op_name, "FTVMStrategy", fstrategy, level) + + +def register_schedule(op_name, schedule, level=10): + fstrategy = create_simple_fstrategy(op_name, schedule) + return register_strategy(op_name, fstrategy, level) + + +def register_strategy_injective(op_name, level=10): + return register_schedule(op_name, _schedule_injective, level) + + +def register_strategy_broadcast(op_name, level=10): + return register_schedule(op_name, _schedule_injective, level) + + +def register_strategy_reduce(op_name, level=10): + return register_schedule(op_name, _schedule_reduce, level) + + def register_alter_op_layout(op_name, alter_layout=None, level=10): """Register alter op layout function for an op @@ -245,6 +295,7 @@ def register_pattern(op_name, pattern, level=10): """ return register(op_name, "TOpPattern", pattern, level) + def register_gradient(op_name, fgradient=None, level=10): """Register operator pattern for an op. @@ -261,6 +312,7 @@ def register_gradient(op_name, fgradient=None, level=10): """ return register(op_name, "FPrimalGradient", fgradient, level) + def register_shape_func(op_name, data_dependant, shape_func=None, level=10): """Register operator shape function for an op. @@ -290,18 +342,8 @@ def _lower(name, schedule, inputs, outputs): def _build(lowered_funcs): return build(lowered_funcs, target="llvm") - -def schedule_injective(attrs, outputs, target): - """Generic schedule for binary broadcast.""" - with target: - return topi.generic.schedule_injective(outputs) - - -def schedule_concatenate(attrs, outputs, target): - """Generic schedule for concatinate.""" - with target: - return topi.generic.schedule_concatenate(outputs) - +_schedule_injective = None +_schedule_reduce = None __DEBUG_COUNTER__ = 0 diff --git a/python/tvm/relay/op/strategy/__init__.py b/python/tvm/relay/op/strategy/__init__.py new file mode 100644 index 0000000000000..cbb9eb6470e7b --- /dev/null +++ b/python/tvm/relay/op/strategy/__init__.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pylint: disable=wildcard-import +"""Relay op strategies.""" +from __future__ import absolute_import as _abs + +from .generic import * +from . import x86 +from . import arm_cpu +from . import cuda +from . import hls +from . import mali +from . import opengl +from . import rocm +from . import intel_graphics diff --git a/python/tvm/relay/op/strategy/arm_cpu.py b/python/tvm/relay/op/strategy/arm_cpu.py new file mode 100644 index 0000000000000..72b5b1aa5b793 --- /dev/null +++ b/python/tvm/relay/op/strategy/arm_cpu.py @@ -0,0 +1,203 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of ARM CPU operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import re +import logging + +import topi +from .generic import * +from .. import op as _op + +logger = logging.getLogger('strategy') + +@schedule_injective.register("arm_cpu") +def schedule_injective_arm_cpu(_, outs, target): + """schedule injective ops for arm cpu""" + with target: + return topi.arm_cpu.schedule_injective(outs) + +@schedule_concatenate.register("arm_cpu") +def schedule_concatenate_arm_cpu(_, outs, target): + """schedule concatenate for arm cpu""" + with target: + return topi.arm_cpu.schedule_concatenate(outs) + +@conv2d_strategy.register("arm_cpu") +def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): + """conv2d arm cpu strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = attrs.get_int_tuple("dilation") + stride_h, stride_w = attrs.get_int_tuple("strides") + padding = attrs.get_int_tuple("padding") + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_spatial_pack), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_spatial_pack)) + + _, _, kh, kw = get_const_tuple(kernel.shape) + pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (kh, kw)) + if kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 and \ + dilation_h == 1 and dilation_w == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_winograd), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd), + 15) + if pt == 1 and pb == 1 and pl == 1 and pr == 1: + strategy.add_implement( + wrap_compute_conv2d_winograd_nnpack( + topi.arm_cpu.conv2d_nchw_winograd_nnpack), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd_nnpack), + 13) + elif layout == "HWCN": + assert kernel_layout == "HWIO" + logger.warning("conv2d with layout HWCN is not optimized for arm cpu.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_hwcn), + wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn)) + elif layout == "NHWC": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.conv2d_nhwc_spatial_pack), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack)) + else: + raise RuntimeError("Unsupported conv2d layout {} for arm cpu".format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" or re.match(r"OIHW\d*o", kernel_layout) + if kernel_layout == "OIHW": + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nchw)) + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack), + wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack), + 15) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + logger.warning("depthwise_conv2d with layout NHWC is not optimized for arm cpu.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {} for arm cpu". + format(layout)) + else: # group_conv2d + if layout == 'NCHW': + assert kernel_layout == "OIHW" + logger.warning("group_conv2d with layout NCHW is not optimized for arm cpu.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True), + wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw)) + else: + raise RuntimeError("Unsupported group_conv2d layout {} for arm cpu". + format(layout)) + return strategy + +def wrap_compute_conv2d_winograd_nnpack(topi_compute): + """wrap topi compute for conv2d_winograd NNPack""" + def _compute_conv2d_nnpack(attrs, inputs, out_type): + padding = attrs.get_int_tuple("padding") + strides = attrs.get_int_tuple("strides") + dilation = attrs.get_int_tuple("dilation") + out_dtype = attrs.get_str("out_dtype") + out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype + return [topi_compute(inputs[0], inputs[1], None, strides, padding, + dilation, out_dtype)] + return _compute_conv2d_nnpack + +@conv2d_winograd_without_weight_transfrom_strategy.register("arm_cpu") +def conv2d_winograd_without_weight_transfrom_strategy_arm_cpu(attrs, inputs, out_type, target): + """conv2d_winograd_without_weight_transfrom arm cpu strategy""" + dilation = attrs.get_int_tuple("dilation") + padding = attrs.get_int_tuple("padding") + groups = attrs.get_int("groups") + layout = attrs.data_layout + stride_h, stride_w = attrs.get_int_tuple("strides") + assert dilation == (1, 1), "Do not support dilate now" + assert groups == 1, "Do not supoort arbitrary group number" + strategy = _op.OpStrategy() + if layout == "NCHW": + _, _, kh, kw = get_const_tuple(inputs[1].shape) + pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (kh, kw)) + assert kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 + strategy.add_implement( + wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_winograd), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd)) + if pt == 1 and pb == 1 and pl == 1 and pr == 1: + strategy.add_implement( + wrap_compute_conv2d_winograd_nnpack( + topi.arm_cpu.conv2d_nchw_winograd_nnpack_without_weight_transform), + wrap_topi_schedule( + topi.arm_cpu.schedule_conv2d_nchw_winograd_nnpack_without_weight_transform), + 5) + else: + raise RuntimeError("Unsupported conv2d_winograd_without_weight_transfrom layout {}". + format(layout)) + return strategy + +@conv2d_transpose_strategy.register("arm_cpu") +def conv2d_transpose_strategy_arm_cpu(attrs, inputs, out_type, target): + """conv2d_transpose arm cpu strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCHW", "only support nchw for now" + assert dilation == (1, 1), "not support dilate now" + assert groups == 1, "only support groups == 1 for now" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_comptue_conv2d_transpose(topi.arm_cpu.conv2d_transpose_nchw), + wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_transpose_nchw)) + return strategy + +@bitserial_conv2d_strategy.register("arm_cpu") +def bitserial_conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): + """bitserial_conv2d x86 strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCHW": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nchw), + wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nchw)) + elif layout == "NHWC": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.arm_cpu.bitserial_conv2d_nhwc), + wrap_topi_schedule(topi.arm_cpu.schedule_bitserial_conv2d_nhwc)) + else: + raise ValueError("Data layout {} not supported.".format(layout)) + return strategy + +@bitserial_dense_strategy.register("arm_cpu") +def schedule_bitserial_dense_arm_cpu(attrs, inputs, out_type, target): + """bitserial_dense arm cpu strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_bitserial_dense(topi.arm_cpu.bitserial_dense), + wrap_topi_schedule(topi.arm_cpu.schedule_bitserial_dense)) + return strategy diff --git a/python/tvm/relay/op/strategy/bifrost.py b/python/tvm/relay/op/strategy/bifrost.py new file mode 100644 index 0000000000000..9407000faed99 --- /dev/null +++ b/python/tvm/relay/op/strategy/bifrost.py @@ -0,0 +1,97 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of bifrost operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import + +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + + +@conv2d_strategy.register("bifrost") +def conv2d_strategy_bifrost(attrs, inputs, out_type, target): + """conv2d mali(bifrost) strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = attrs.get_int_tuple("dilation") + stride_h, stride_w = attrs.get_int_tuple("strides") + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.bifrost.conv2d_nchw_spatial_pack), + wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_spatial_pack)) + + _, _, kh, kw = get_const_tuple(kernel.shape) + if kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 and \ + dilation_h == 1 and dilation_w == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.bifrost.conv2d_nchw_winograd), + wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_winograd), + 15) + else: + raise RuntimeError("Unsupported conv2d layout {} for Mali(Bifrost)". + format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.bifrost.schedule_depthwise_conv2d_nchw)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {} for Mali(Bifrost)". + format(layout)) + else: # group_conv2d + raise RuntimeError("group_conv2d is not supported for Mali(Bifrost)") + return strategy + +@conv2d_winograd_without_weight_transfrom_strategy.register("bifrost") +def conv2d_winograd_without_weight_transfrom_strategy_bifrost(attrs, inputs, out_type, target): + """conv2d_winograd_without_weight_transfrom mali(bifrost) strategy""" + dilation = attrs.get_int_tuple("dilation") + groups = attrs.get_int("groups") + layout = attrs.data_layout + stride_h, stride_w = attrs.get_int_tuple("strides") + assert dilation == (1, 1), "Do not support dilate now" + assert groups == 1, "Do not supoort arbitrary group number" + strategy = _op.OpStrategy() + if layout == "NCHW": + _, _, kh, kw = get_const_tuple(inputs[1].shape) + assert kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 + strategy.add_implement( + wrap_compute_conv2d(topi.bifrost.conv2d_nchw_winograd), + wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_winograd)) + else: + raise RuntimeError("Unsupported conv2d_winograd_without_weight_transfrom layout {}". + format(layout)) + return strategy + +@dense_strategy.register("bifrost") +def dense_strategy_bifrost(attrs, inputs, out_type, target): + """dense mali(bifrost) strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_dense(topi.bifrost.dense), + wrap_topi_schedule(topi.bifrost.schedule_dense)) + return strategy diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py new file mode 100644 index 0000000000000..ca07604e6418d --- /dev/null +++ b/python/tvm/relay/op/strategy/cuda.py @@ -0,0 +1,352 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of CUDA/GPU operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op +from ....schedule import SpecializedCondition + +@schedule_injective.register(["cuda", "gpu"]) +def schedule_injective_cuda(attrs, outs, target): + """schedule injective ops for cuda""" + with target: + return topi.cuda.schedule_injective(outs) + +@schedule_reduce.register(["cuda", "gpu"]) +def schedule_reduce_cuda(attrs, outs, target): + """schedule reduction ops for cuda""" + with target: + return topi.cuda.schedule_reduce(outs) + +@schedule_concatenate.register(["cuda", "gpu"]) +def schedule_concatenate_cuda(attrs, outs, target): + """schedule concatenate for cuda""" + with target: + return topi.cuda.schedule_injective(outs) + +@schedule_pool.register(["cuda", "gpu"]) +def schedule_pool_cuda(attrs, outs, target): + """schedule pooling ops for cuda""" + with target: + return topi.cuda.schedule_pool(outs, attrs.layout) + +@schedule_pool_grad.register(["cuda", "gpu"]) +def schedule_pool_grad_cuda(attrs, outs, target): + """schedule pooling gradient ops for cuda""" + with target: + return topi.cuda.schedule_pool_grad(outs) + +@schedule_adaptive_pool.register(["cuda", "gpu"]) +def schedule_adaptive_pool_cuda(attrs, outs, target): + """schedule adaptive pooling ops for cuda""" + with target: + return topi.cuda.schedule_adaptive_pool(outs) + +@schedule_softmax.register(["cuda", "gpu"]) +def schedule_softmax_cuda(attrs, outs, target): + """schedule softmax for cuda""" + with target: + return topi.cuda.schedule_softmax(outs) + +@schedule_lrn.register(["cuda", "gpu"]) +def schedule_lrn_cuda(attrs, outs, target): + """schedule LRN for cuda""" + with target: + return topi.cuda.schedule_lrn(outs) + +@schedule_l2_normalize.register(["cuda", "gpu"]) +def schedule_l2_normalize_cuda(attrs, outs, target): + """schedule L2 normalize for cuda""" + with target: + return topi.cuda.schedule_l2_normalize(outs) + +@conv2d_strategy.register(["cuda", "gpu"]) +def conv2d_strategy_cuda(attrs, inputs, out_type, target): + """conv2d cuda strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + stride_h, stride_w = attrs.get_int_tuple("strides") + dilation_h, dilation_w = attrs.get_int_tuple("dilation") + padding = attrs.get_int_tuple("padding") + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + # TODO(@vinx13, @icemelon9): Use conv2d_NCHWc_int8 when dtype is int8/uint8. + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nchw), + wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw)) + _, _, kh, kw = get_const_tuple(kernel.shape) + if kh <= 7 and kw <= 7 and kh == kw and stride_h == 1 and stride_w == 1 and \ + dilation_h == 1 and dilation_w == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd), + wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_winograd), + 15) + elif layout == "HWCN": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_hwcn), + wrap_topi_schedule(topi.cuda.schedule_conv2d_hwcn)) + # Re-enable this after @alexgl-github fix the conv2d_nhwc for cuda + # elif layout == "NHWC": + # assert kernel_layout == "HWIO" + # strategy.add_implement( + # wrap_compute_conv2d(topi.cuda.conv2d_nhwc), + # wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc)) + elif layout == "NCHW4c" and data.dtype in ["int8", "uint8"]: + assert kernel_layout == "OIHW4o4i" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_NCHWc_int8, True), + wrap_topi_schedule(topi.cuda.schedule_conv2d_NCHWc_int8)) + else: + raise RuntimeError("Unsupported conv2d layout {} for CUDA".format(layout)) + # add cudnn implementation + if target.target_name == "cuda" and "cudnn" in target.libs: + if layout in ["NCHW", "NHWC"] and padding[0] == padding[2] and \ + padding[1] == padding[3]: + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_cudnn, True), + wrap_topi_schedule(topi.cuda.schedule_conv2d_cudnn), 5) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + if layout == 'NCHW': + # TODO(@vinx13, @icemelon9): Use group_conv2d_NCHWc_int8 when dtype is int8/uint8. + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.group_conv2d_nchw, has_groups=True), + wrap_topi_schedule(topi.cuda.schedule_group_conv2d_nchw)) + elif layout == 'NCHW4c' and data.dtype in ["int8", "uint8"]: + assert kernel_layout == "OIHW4o4i" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.group_conv2d_NCHWc_int8, True), + wrap_topi_schedule(topi.cuda.schedule_group_conv2d_NCHWc_int8)) + else: + raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + return strategy + +@conv2d_winograd_without_weight_transfrom_strategy.register(["cuda", "gpu"]) +def conv2d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_type, target): + """conv2d_winograd_without_weight_transfrom cuda strategy""" + dilation = attrs.get_int_tuple("dilation") + groups = attrs.get_int("groups") + layout = attrs.data_layout + assert dilation == (1, 1), "Do not support dilate now" + assert groups == 1, "Do not supoort arbitrary group number" + strategy = _op.OpStrategy() + if layout == "NCHW": + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd_without_weight_transform), + wrap_topi_schedule( + topi.cuda.schedule_conv2d_nchw_winograd_without_weight_transform_cuda)) + else: + raise RuntimeError("Unsupported conv2d_winograd_without_weight_transfrom layout {}". + format(layout)) + return strategy + +@deformable_conv2d_strategy.register(["cuda", "gpu"]) +def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target): + """deformable_conv2d cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw), + wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw)) + return strategy + +@conv2d_transpose_strategy.register(["cuda", "gpu"]) +def conv2d_transpose_strategy_cuda(attrs, inputs, out_type, target): + """conv2d_transpose cuda strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCHW", "only support nchw for now" + assert dilation == (1, 1), "not support dilate now" + assert groups == 1, "only support groups == 1 for now" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_comptue_conv2d_transpose(topi.cuda.conv2d_transpose_nchw), + wrap_topi_schedule(topi.cuda.schedule_conv2d_transpose_nchw)) + return strategy + +@conv3d_strategy.register(["cuda", "gpu"]) +def conv3d_strategy_cuda(attrs, inputs, out_type, target): + """conv3d cuda strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout) + if layout == "NCDHW": + strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ncdhw), + wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw), + 10) + else: # layout == "NDHWC": + strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ndhwc), + wrap_topi_schedule(topi.cuda.schedule_conv3d_ndhwc), + 10) + if target.target_name == "cuda" and "cudnn" in target.libs: + strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_cudnn), + wrap_topi_schedule(topi.cuda.schedule_conv3d_cudnn), + 15) + return strategy + +@conv1d_strategy.register(["cuda", "gpu"]) +def conv1d_strategy_cuda(attrs, inputs, out_type, target): + """conv1d cuda strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + if dilation[0] < 1: + raise ValueError("dilation should be a positive value") + strategy = _op.OpStrategy() + if layout == "NCW": + strategy.add_implement(wrap_compute_conv1d(topi.cuda.conv1d_ncw), + wrap_topi_schedule(topi.cuda.schedule_conv1d_ncw)) + elif layout == "NWC": + strategy.add_implement(wrap_compute_conv1d(topi.cuda.conv1d_nwc), + wrap_topi_schedule(topi.cuda.schedule_conv1d_nwc)) + else: + raise ValueError("Unsupported conv1d layout {}".format(layout)) + return strategy + +@conv1d_transpose_strategy.register(["cuda", "gpu"]) +def conv1d_transpose_strategy_cuda(attrs, inputs, out_type, target): + """conv1d_transpose cuda strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCW", "conv1d_transpose ncw only supported" + assert dilation == (1,), "conv1d_transpose dilation is not supported" + assert groups == 1, "conv1d_transpose groups == 1 only supported" + strategy.add_implement(wrap_compute_conv1d_transpose(topi.cuda.conv1d_transpose_ncw), + wrap_topi_schedule(topi.cuda.schedule_conv1d_transpose_ncw)) + return strategy + +@dense_strategy.register(["cuda", "gpu"]) +def dense_strategy_cuda(attrs, inputs, out_type, target): + """dense cuda strategy""" + strategy = _op.OpStrategy() + if out_type.dtype == "int8": + strategy.add_implement(wrap_compute_dense(topi.cuda.dense_int8), + wrap_topi_schedule(topi.cuda.schedule_dense_int8)) + else: + strategy.add_implement(wrap_compute_dense(topi.cuda.dense_small_batch), + wrap_topi_schedule(topi.cuda.schedule_dense_small_batch)) + b = inputs[0].shape[0] + with SpecializedCondition(b >= 32): + strategy.add_implement(wrap_compute_dense(topi.cuda.dense_large_batch), + wrap_topi_schedule(topi.cuda.schedule_dense_large_batch)) + if target.target_name == "cuda" and "cublas" in target.libs: + strategy.add_implement(wrap_compute_dense(topi.cuda.dense_cublas), + wrap_topi_schedule(topi.cuda.schedule_dense_cublas), 5) + return strategy + +@batch_matmul_strategy.register(["cuda", "gpu"]) +def batch_matmul_strategy_cuda(attrs, inputs, out_type, target): + """batch_matmul cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_batch_matmul(topi.nn.batch_matmul), + wrap_topi_schedule(topi.cuda.schedule_batch_matmul), + 10) + if target.target_name == "cuda" and "cublas" in target.libs: + strategy.add_implement(wrap_compute_batch_matmul(topi.cuda.batch_matmul_cublas), + wrap_topi_schedule(topi.generic.schedule_extern), + 15) + return strategy + +@argsort_strategy.register(["cuda", "gpu"]) +def argsort_strategy_cuda(attrs, inputs, out_type, target): + """argsort cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_argsort(topi.cuda.argsort_gpu), + wrap_topi_schedule(topi.cuda.schedule_argsort)) + return strategy + +@topk_strategy.register(["cuda", "gpu"]) +def topk_strategy_cuda(attrs, inputs, out_type, target): + """topk cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_topk(topi.cuda.topk_gpu), + wrap_topi_schedule(topi.cuda.schedule_topk)) + return strategy + +@schedule_multibox_prior.register(["cuda", "gpu"]) +def schedule_multibox_prior_cuda(attrs, outs, target): + """schedule multibox_prior for cuda""" + with target: + return topi.cuda.schedule_multibox_prior(outs) + +@schedule_multibox_transform_loc.register(["cuda", "gpu"]) +def schedule_multibox_transform_loc_cuda(attrs, outs, target): + """schedule multibox_transform_loc for cuda""" + with target: + return topi.cuda.schedule_multibox_transform_loc(outs) + +@get_valid_counts_strategy.register(["cuda", "gpu"]) +def get_valid_counts_strategy_cuda(attrs, inputs, out_type, target): + """get_valid_counts cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_get_valid_counts(topi.cuda.get_valid_counts), + wrap_topi_schedule(topi.cuda.schedule_get_valid_counts)) + return strategy + +@nms_strategy.register(["cuda", "gpu"]) +def nms_strategy_cuda(attrs, inputs, out_type, target): + """nms cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_nms(topi.cuda.non_max_suppression), + wrap_topi_schedule(topi.cuda.schedule_nms)) + return strategy + +@roi_align_strategy.register(["cuda", "gpu"]) +def roi_align_strategy_cuda(attrs, inputs, out_type, target): + """roi_align cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw), + wrap_topi_schedule(topi.cuda.schedule_roi_align)) + return strategy + +@schedule_roi_pool.register(["cuda", "gpu"]) +def schedule_roi_pool_cuda(attrs, outs, target): + """schedule roi_pool for cuda""" + with target: + return topi.cuda.schedule_roi_pool(outs) + +@proposal_strategy.register(["cuda", "gpu"]) +def proposal_strategy_cuda(attrs, inputs, out_type, target): + """proposal cuda strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_proposal(topi.cuda.proposal), + wrap_topi_schedule(topi.cuda.schedule_proposal)) + return strategy diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py new file mode 100644 index 0000000000000..73923e5545796 --- /dev/null +++ b/python/tvm/relay/op/strategy/generic.py @@ -0,0 +1,678 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of generic operator strategy.""" +# pylint: disable=invalid-name,unused-argument +from __future__ import absolute_import + +import re +import topi +from topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple +from .. import op as _op +from ....target import generic_func, override_native_generic_func + +def wrap_topi_schedule(topi_schedule): + """Wrap TOPI schedule which doesn't use attrs""" + def wrapper(attrs, outs, target): + with target: + return topi_schedule(outs) + return wrapper + +def get_conv2d_in_channels(data_shape, data_layout): + """Get conv2d input channels""" + data_shape = get_const_tuple(data_shape) + if len(data_shape) == 4: + idx = data_layout.find("C") + assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout) + return data_shape[idx] + elif re.match(r"NCHW\d*c", data_layout): + # NCHW[8]c + return data_shape[1] * data_shape[4] + else: + raise ValueError("Unknown conv2d data layout {}".format(data_layout)) + +def get_conv2d_out_channels(kernel_shape, kernel_layout): + """Get conv2d output channels""" + kernel_shape = get_const_tuple(kernel_shape) + if len(kernel_shape) == 4: + idx = kernel_layout.find("O") + assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout) + return kernel_shape[idx] + elif re.match(r"OIHW\d*i\d*o", kernel_layout): + return kernel_shape[0] * kernel_shape[5] + elif re.match(r"OIHW\d*o", kernel_layout): + return kernel_shape[0] * kernel_shape[4] + else: + raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout)) + +def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups): + ic = get_conv2d_in_channels(data_shape, data_layout) + oc = get_conv2d_out_channels(kernel_shape, kernel_layout) + return ic == oc == groups + +@generic_func +def schedule_injective(attrs, outs, target): + """Schedule injective ops""" + with target: + return topi.generic.schedule_injective(outs) + +@generic_func +def schedule_reduce(attrs, outs, target): + """Schedule reduction ops""" + with target: + return topi.generic.schedule_reduce(outs) + +_op._schedule_injective = schedule_injective +_op._schedule_reduce = schedule_reduce + +# concatenate +@generic_func +def schedule_concatenate(attrs, outs, target): + """Schedule concatenate op""" + with target: + return topi.generic.schedule_injective(outs) + +# pool +@generic_func +def schedule_pool(attrs, outs, target): + """Schedule pooling ops""" + with target: + return topi.generic.schedule_pool(outs, attrs.layout) + +# pool_grad +@generic_func +def schedule_pool_grad(attrs, outs, target): + """Schedule pooling gradient ops""" + with target: + return topi.generic.schedule_pool_grad(outs) + +# adaptive pool +@generic_func +def schedule_adaptive_pool(attrs, outs, target): + """Schedule adaptive pooling ops""" + with target: + return topi.generic.schedule_adaptive_pool(outs) + +# softmax +@generic_func +def schedule_softmax(attrs, outs, target): + """Schedule softmax""" + with target: + return topi.generic.schedule_softmax(outs) + +# lrn +@generic_func +def schedule_lrn(attrs, outs, target): + """Schedule LRN op""" + with target: + return topi.generic.schedule_lrn(outs) + +# l2_normalize +@generic_func +def schedule_l2_normalize(attrs, outs, target): + """Schedule L2 normalize op""" + with target: + return topi.generic.schedule_l2_normalize(outs) + +# bitpack +@generic_func +def schedule_bitpack(attrs, outs, target): + """Schedule bitpack""" + with target: + return topi.generic.schedule_bitpack(outs) + +# conv2d +def wrap_compute_conv2d(topi_compute, need_data_layout=False, need_out_layout=False, + has_groups=False): + """Wrap conv2d topi compute""" + def _compute_conv2d(attrs, inputs, out_type): + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + dilation = get_const_tuple(attrs.dilation) + data_layout = attrs.get_str("data_layout") + out_layout = attrs.get_str("out_layout") + out_dtype = attrs.out_dtype + out_dtype = (inputs[0].dtype if out_dtype in ("same", "") + else out_dtype) + args = [inputs[0], inputs[1], strides, padding, dilation] + if has_groups: + args.append(attrs.groups) + if need_data_layout: + args.append(data_layout) + if need_out_layout: + args.append(out_layout) + args.append(out_dtype) + return [topi_compute(*args)] + return _compute_conv2d + +@override_native_generic_func("conv2d_strategy") +def conv2d_strategy(attrs, inputs, out_type, target): + """conv2d generic strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + (dilation_h, dilation_w) = dilation + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_nchw), + wrap_topi_schedule(topi.generic.schedule_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_nhwc), + wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc)) + elif layout == "HWCN": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_hwcn), + wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn)) + else: + raise RuntimeError("Unsupported conv2d layout {}".format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + if layout == 'NCHW': + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True), + wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw)) + else: + raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + return strategy + +# conv2d_NCHWc +@override_native_generic_func("conv2d_NCHWc_strategy") +def conv2d_NCHWc_strategy(attrs, inputs, out_type, target): + """conv2d_NCHWc generic strategy""" + strategy = _op.OpStrategy() + if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8": + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True), + wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8)) + else: + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc)) + return strategy + +# depthwise_conv2d_NCHWc +@override_native_generic_func("depthwise_conv2d_NCHWc_strategy") +def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target): + """depthwise_conv2d generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc)) + return strategy + +# conv2d_winograd_without_weight_transform +@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy") +def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target): + """conv2d_winograd_without_weight_transfrom generic strategy""" + raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform") + +# conv2d_winograd_weight_transform +@generic_func +def schedule_conv2d_winograd_weight_transform(attrs, outs, target): + """Schedule conv2d_winograd_weight_transform""" + with target: + return topi.generic.schedule_conv2d_winograd_weight_transform(outs) + +# conv2d_winograd_nnpack_weight_transform +@generic_func +def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target): + """Schedule conv2d_winograd_nnpack_weight_transform""" + with target: + return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs) + +# deformable_conv2d +def wrap_compute_deformable_conv2d(topi_compute): + """wrap deformable_conv2d topi compute""" + def _compute_deformable_conv2d(attrs, inputs, out_dtype): + assert attrs.data_layout == "NCHW" + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + dilation = get_const_tuple(attrs.dilation) + deformable_groups = attrs.deformable_groups + groups = attrs.groups + out_dtype = attrs.out_dtype + out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype + out = topi_compute(inputs[0], inputs[1], inputs[2], strides, padding, + dilation, deformable_groups, groups, out_dtype) + return [out] + return _compute_deformable_conv2d + +@override_native_generic_func("deformable_conv2d_strategy") +def deformable_conv2d_strategy(attrs, inputs, out_type, target): + """deformable_conv2d generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw), + wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw)) + return strategy + +# conv2d_transpose +def wrap_comptue_conv2d_transpose(topi_compute): + """wrap conv2d_transpose topi compute""" + def compute_conv2d_transpose(attrs, inputs, out_dtype): + """Compute definition of conv2d_transpose""" + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + out_dtype = attrs.out_dtype + out_dtype = (inputs[0].dtype if out_dtype in ("same", "") + else out_dtype) + out = topi_compute( + inputs[0], inputs[1], strides, padding, out_dtype) + output_padding = get_const_tuple(attrs.output_padding) + out = topi.nn.pad(out, [0, 0, 0, 0], + [0, 0, output_padding[0], output_padding[1]]) + return [out] + return compute_conv2d_transpose + +@override_native_generic_func("conv2d_transpose_strategy") +def conv2d_transpose_strategy(attrs, inputs, out_type, target): + """conv2d_transpose generic strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCHW", "only support nchw for now" + assert dilation == (1, 1), "not support dilate now" + assert groups == 1, "only support groups == 1 for now" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_comptue_conv2d_transpose(topi.nn.conv2d_transpose_nchw), + wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw)) + return strategy + +# conv3d +def wrap_compute_conv3d(topi_compute): + """wrap conv3d topi compute""" + def _compute_conv3d(attrs, inputs, out_type): + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + layout = attrs.data_layout + out_dtype = attrs.out_dtype + out_dtype = (inputs[0].dtype if out_dtype in ("same", "") + else out_dtype) + + (dilation_d, dilation_h, dilation_w) = dilation + if dilation_d < 1 or dilation_h < 1 or dilation_w < 1: + raise ValueError("Dilation should be positive value") + + if groups == 1: + out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, + layout, out_dtype) + else: + raise ValueError("Not support arbitrary group number for now") + return [out] + return _compute_conv3d + +@override_native_generic_func("conv3d_strategy") +def conv3d_strategy(attrs, inputs, out_type, target): + """conv3d generic strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCDHW": + strategy.add_implement(wrap_compute_conv3d(topi.nn.conv3d_ncdhw), + wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw)) + elif layout == "NDHWC": + strategy.add_implement(wrap_compute_conv3d(topi.nn.conv3d_ndhwc), + wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc)) + else: + raise ValueError("Not support this layout {} yet".format(layout)) + return strategy + +# conv1d +def wrap_compute_conv1d(topi_compute): + """wrap conv1d topi compute""" + def _compute_conv1d(attrs, inputs, out_type): + """Compute definition of conv1d""" + strides = get_const_tuple(attrs.strides) + padding = get_const_tuple(attrs.padding) + dilation = get_const_tuple(attrs.dilation) + out_dtype = attrs.out_dtype + out_dtype = (inputs[0].dtype if out_dtype in ("same", "") + else out_dtype) + return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, + out_dtype)] + return _compute_conv1d + +@override_native_generic_func("conv1d_strategy") +def conv1d_strategy(attrs, inputs, out_type, target): + """conv1d generic strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + if dilation[0] < 1: + raise ValueError("dilation should be a positive value") + strategy = _op.OpStrategy() + if layout == "NCW": + strategy.add_implement(wrap_compute_conv1d(topi.nn.conv1d_ncw), + wrap_topi_schedule(topi.generic.schedule_conv1d_ncw)) + elif layout == "NWC": + strategy.add_implement(wrap_compute_conv1d(topi.nn.conv1d_nwc), + wrap_topi_schedule(topi.generic.schedule_conv1d_nwc)) + else: + raise ValueError("Unsupported conv1d layout {}".format(layout)) + return strategy + +# conv1d_transpose +def wrap_compute_conv1d_transpose(topi_compute): + """wrap conv1d_transpose topi compute""" + def _compute_conv1d_tranpsoe(attrs, inputs, out_type): + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + out_dtype = attrs.out_dtype + out_dtype = (inputs[0].dtype if out_dtype in ("same", "") else out_dtype) + out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype) + output_padding = get_const_tuple(attrs.output_padding) + out = topi.nn.pad(out, [0, 0, 0], [0, 0, output_padding[0]]) + return [out] + return _compute_conv1d_tranpsoe + +@override_native_generic_func("conv1d_transpose_strategy") +def conv1d_transpose_strategy(attrs, inputs, out_type, target): + """conv1d_transpose generic strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCW", "conv1d_transpose ncw only supported" + assert dilation == (1,), "conv1d_transpose dilation is not supported" + assert groups == 1, "conv1d_transpose groups == 1 only supported" + strategy.add_implement(wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw), + wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw)) + return strategy + +# dense +def wrap_compute_dense(topi_compute): + """wrap dense topi compute""" + def _compute_dense(attrs, inputs, out_type): + """Compute definition of dense""" + out_dtype = attrs.out_dtype + out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype + return [topi_compute(inputs[0], inputs[1], None, out_dtype)] + return _compute_dense + +@override_native_generic_func("dense_strategy") +def dense_strategy(attrs, inputs, out_type, target): + """dense generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_dense(topi.nn.dense), + wrap_topi_schedule(topi.generic.schedule_dense)) + return strategy + +# batch_matmul +def wrap_compute_batch_matmul(topi_func): + """wrap batch_matmul topi compute""" + def _compute_batch_matmul(attrs, inputs, out_type): + return [topi_func(inputs[0], inputs[1])] + return _compute_batch_matmul + +@override_native_generic_func("batch_matmul_strategy") +def batch_matmul_strategy(attrs, inputs, out_type, target): + """batch_matmul generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_batch_matmul(topi.nn.batch_matmul), + wrap_topi_schedule(topi.generic.schedule_batch_matmul)) + return strategy + +# sparse_dense +@generic_func +def schedule_sparse_dense(attrs, outs, target): + """schedule sparse_dense""" + with target: + return topi.generic.schedule_sparse_dense(outs) + +# sparse_transpose +@generic_func +def schedule_sparse_transpose(attrs, outs, target): + """schedule sparse_transpose""" + with target: + return topi.generic.schedule_sparse_transpose(outs) + +# argsort +def wrap_compute_argsort(topi_compute): + """Wrap argsort topi compute""" + def _compute_argsort(attrs, inputs, _): + axis = get_const_int(attrs.axis) + is_ascend = bool(get_const_int(attrs.is_ascend)) + dtype = attrs.dtype + return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)] + return _compute_argsort + +@override_native_generic_func("argsort_strategy") +def argsort_strategy(attrs, inputs, out_type, target): + """argsort generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_argsort(topi.argsort), + wrap_topi_schedule(topi.generic.schedule_argsort)) + return strategy + +# topk +def wrap_compute_topk(topi_func): + """Wrap topk compute""" + def _compute_topk(attrs, inputs, out_type): + k = get_const_int(attrs.k) + axis = get_const_int(attrs.axis) + ret_type = attrs.ret_type + is_ascend = bool(get_const_int(attrs.is_ascend)) + dtype = attrs.dtype + out = topi_func(inputs[0], k, axis, ret_type, is_ascend, dtype) + out = out if isinstance(out, list) else [out] + return out + return _compute_topk + +@override_native_generic_func("topk_strategy") +def topk_strategy(attrs, inputs, out_type, target): + """topk generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_topk(topi.topk), + wrap_topi_schedule(topi.generic.schedule_topk)) + return strategy + +# multibox_prior +@generic_func +def schedule_multibox_prior(attrs, outs, target): + """schedule multibox_prior""" + with target: + return topi.generic.schedule_multibox_prior(outs) + +# multibox_transform_loc +@generic_func +def schedule_multibox_transform_loc(attrs, outs, target): + """schedule multibox_transform_loc""" + with target: + return topi.generic.schedule_multibox_transform_loc(outs) + +# get_valid_counts +def wrap_compute_get_valid_counts(topi_compute): + """wrap get_valid_counts topi compute""" + def _compute_get_valid_counts(attrs, inputs, out_type): + score_threshold = get_const_float(attrs.score_threshold) + id_index = get_const_int(attrs.id_index) + score_index = get_const_int(attrs.score_index) + return topi_compute(inputs[0], score_threshold, id_index, score_index) + return _compute_get_valid_counts + +@override_native_generic_func("get_valid_counts_strategy") +def get_valid_counts_strategy(attrs, inputs, out_type, target): + """get_valid_counts generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_get_valid_counts(topi.vision.get_valid_counts), + wrap_topi_schedule(topi.generic.schedule_get_valid_counts)) + return strategy + +# non-maximum suppression +def wrap_compute_nms(topi_compute): + """wrap nms topi compute""" + def _compute_nms(attrs, inputs, out_type): + return_indices = bool(get_const_int(attrs.return_indices)) + max_output_size = get_const_int(attrs.max_output_size) + iou_threshold = get_const_float(attrs.iou_threshold) + force_suppress = bool(get_const_int(attrs.force_suppress)) + top_k = get_const_int(attrs.top_k) + coord_start = get_const_int(attrs.coord_start) + score_index = get_const_int(attrs.score_index) + id_index = get_const_int(attrs.id_index) + invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom)) + return [topi_compute(inputs[0], inputs[1], max_output_size, iou_threshold, + force_suppress, top_k, coord_start, score_index, + id_index, return_indices, invalid_to_bottom)] + return _compute_nms + +@override_native_generic_func("non_max_suppression_strategy") +def nms_strategy(attrs, inputs, out_type, target): + """nms generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_nms(topi.vision.non_max_suppression), + wrap_topi_schedule(topi.generic.schedule_nms)) + return strategy + +# roi_align +def wrap_compute_roi_align(topi_compute): + """wrap roi_align topi compute""" + def _compute_roi_align(attrs, inputs, out_type): + assert attrs.layout == "NCHW" + pooled_size = get_const_tuple(attrs.pooled_size) + return [topi_compute(inputs[0], inputs[1], + pooled_size=pooled_size, + spatial_scale=attrs.spatial_scale, + sample_ratio=attrs.sample_ratio)] + return _compute_roi_align + +@override_native_generic_func("roi_align_strategy") +def roi_align_strategy(attrs, inputs, out_type, target): + """roi_align generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw), + wrap_topi_schedule(topi.generic.schedule_roi_align)) + return strategy + +# roi_pool +@generic_func +def schedule_roi_pool(attrs, outs, target): + """schedule roi_pool""" + with target: + return topi.generic.schedule_roi_pool(outs) + +# proposal +def wrap_compute_proposal(topi_compute): + """wrap proposal topi compute""" + def _compute_proposal(attrs, inputs, out_type): + scales = get_float_tuple(attrs.scales) + ratios = get_float_tuple(attrs.ratios) + feature_stride = attrs.feature_stride + threshold = attrs.threshold + rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n + rpn_post_nms_top_n = attrs.rpn_post_nms_top_n + rpn_min_size = attrs.rpn_min_size + iou_loss = bool(get_const_int(attrs.iou_loss)) + return [topi_compute(inputs[0], inputs[1], inputs[2], scales, ratios, + feature_stride, threshold, rpn_pre_nms_top_n, + rpn_post_nms_top_n, rpn_min_size, iou_loss)] + return _compute_proposal + +@override_native_generic_func("proposal_strategy") +def proposal_strategy(attrs, inputs, out_type, target): + """proposal generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_proposal(topi.vision.rcnn.proposal), + wrap_topi_schedule(topi.generic.schedule_proposal)) + return strategy + +# argwhere +@generic_func +def schedule_argwhere(attrs, outs, target): + """schedule argwhere""" + with target: + return topi.generic.schedule_argwhere(outs) + +# bitserial_conv2d +def wrap_compute_bitserial_conv2d(topi_compute): + """wrap bitserial_conv2d topi compute""" + def compute_bitserial_conv2d(attrs, inputs, out_dtype): + """Compute definition for bitserial conv2d.""" + padding = get_const_tuple(attrs.padding) + strides = get_const_tuple(attrs.strides) + activation_bits = attrs.activation_bits + weight_bits = attrs.weight_bits + pack_dtype = attrs.pack_dtype + out_dtype = attrs.out_dtype + unipolar = attrs.unipolar + return [topi_compute(inputs[0], inputs[1], strides, padding, activation_bits, + weight_bits, pack_dtype, out_dtype, unipolar)] + return compute_bitserial_conv2d + +@override_native_generic_func("bitserial_conv2d_strategy") +def bitserial_conv2d_strategy(attrs, inputs, out_type, target): + """bitserial_conv2d generic strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCHW": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw), + wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw)) + elif layout == "NHWC": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc), + wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc)) + else: + raise ValueError("Data layout {} not supported.".format(layout)) + return strategy + +# bitserial_dense +def wrap_compute_bitserial_dense(topi_compute): + """wrap bitserial_dense topi compute""" + def compute_bitserial_dense(attrs, inputs, out_type): + """Compute definition of bitserial dense""" + data_bits = attrs.data_bits + weight_bits = attrs.weight_bits + pack_dtype = attrs.pack_dtype + out_dtype = attrs.out_dtype + out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype + unipolar = attrs.unipolar + return [topi_compute(inputs[0], inputs[1], data_bits, weight_bits, + pack_dtype, out_dtype, unipolar)] + return compute_bitserial_dense + +@override_native_generic_func("bitserial_dense_strategy") +def bitserial_dense_strategy(attrs, inputs, out_type, target): + """bitserial_dense generic strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_bitserial_dense(topi.nn.bitserial_dense), + wrap_topi_schedule(topi.generic.schedule_bitserial_dense)) + return strategy diff --git a/python/tvm/relay/op/strategy/hls.py b/python/tvm/relay/op/strategy/hls.py new file mode 100644 index 0000000000000..0600f875416a3 --- /dev/null +++ b/python/tvm/relay/op/strategy/hls.py @@ -0,0 +1,151 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of HLS operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + +@schedule_injective.register("hls") +def schedule_injective_hls(attrs, outs, target): + """schedule injective ops for hls""" + with target: + return topi.hls.schedule_injective(outs) + +@schedule_reduce.register("hls") +def schedule_reduce_hls(attrs, outs, target): + """schedule reduction ops for hls""" + with target: + return topi.hls.schedule_reduce(outs) + +@schedule_concatenate.register("hls") +def schedule_concatenate_hls(attrs, outs, target): + """schedule concatenate for hls""" + with target: + return topi.hls.schedule_injective(outs) + +@schedule_pool.register("hls") +def schedule_pool_hls(attrs, outs, target): + """schedule pooling ops for hls""" + with target: + return topi.hls.schedule_pool(outs, attrs.layout) + +@schedule_adaptive_pool.register("hls") +def schedule_adaptive_pool_hls(attrs, outs, target): + """schedule adaptive pooling ops for hls""" + with target: + return topi.hls.schedule_adaptive_pool(outs) + +@schedule_softmax.register("hls") +def schedule_softmax_hls(attrs, outs, target): + """schedule softmax for hls""" + with target: + return topi.hls.schedule_softmax(outs) + +@override_native_generic_func("conv2d_strategy") +def conv2d_strategy_hls(attrs, inputs, out_type, target): + """conv2d hls strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + (dilation_h, dilation_w) = dilation + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_nchw), + wrap_topi_schedule(topi.hls.schedule_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_nhwc), + wrap_topi_schedule(topi.hls.schedule_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported conv2d layout {}".format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.hls.schedule_depthwise_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.hls.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + raise RuntimeError("group_conv2d is not supported for hls") + return strategy + +@override_native_generic_func("conv2d_NCHWc_strategy") +def conv2d_NCHWc_strategy_hls(attrs, inputs, out_type, target): + """conv2d_NCHWc hls strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.hls.schedule_conv2d_NCHWc)) + return strategy + +@conv2d_transpose_strategy.register("hls") +def conv2d_transpose_strategy_hls(attrs, inputs, out_type, target): + """conv2d_transpose hls strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCHW", "only support nchw for now" + assert dilation == (1, 1), "not support dilate now" + assert groups == 1, "only support groups == 1 for now" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_comptue_conv2d_transpose(topi.nn.conv2d_transpose_nchw), + wrap_topi_schedule(topi.hls.schedule_conv2d_transpose_nchw)) + return strategy + +@dense_strategy.register("hls") +def dense_strategy_hls(attrs, inputs, out_type, target): + """dense hls strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_dense(topi.nn.dense), + wrap_topi_schedule(topi.hls.schedule_dense)) + return strategy + +@bitserial_conv2d_strategy.register("hls") +def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target): + """bitserial_conv2d hls strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCHW": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw), + wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nchw)) + elif layout == "NHWC": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc), + wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nhwc)) + else: + raise ValueError("Data layout {} not supported.".format(layout)) + return strategy diff --git a/python/tvm/relay/op/strategy/intel_graphics.py b/python/tvm/relay/op/strategy/intel_graphics.py new file mode 100644 index 0000000000000..c94d5cbc211d0 --- /dev/null +++ b/python/tvm/relay/op/strategy/intel_graphics.py @@ -0,0 +1,72 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of x86 operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + + +@conv2d_strategy.register("intel_graphics") +def conv2d_strategy_intel_graphics(attrs, inputs, out_type, target): + """conv2d intel graphics strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = get_const_tuple(attrs.dilation) + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.intel_graphics.conv2d_nchw), + wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_nchw)) + # conv2d_NCHWc won't work without alter op layout pass + # TODO(@Laurawly): fix this + strategy.add_implement( + wrap_compute_conv2d(topi.intel_graphics.conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc), + 5) + else: + raise RuntimeError("Unsupported conv2d layout {} for intel graphics". + format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.intel_graphics.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.intel_graphics.schedule_depthwise_conv2d_nchw)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + raise RuntimeError("group_conv2d is not supported for intel graphics") + return strategy + +@conv2d_NCHWc_strategy.register("intel_graphics") +def conv2d_NCHWc_strategy_intel_graphics(attrs, inputs, out_type, target): + """conv2d_NCHWc intel_graphics strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_conv2d(topi.intel_graphics.conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc)) + return strategy diff --git a/python/tvm/relay/op/strategy/mali.py b/python/tvm/relay/op/strategy/mali.py new file mode 100644 index 0000000000000..8641a959952fb --- /dev/null +++ b/python/tvm/relay/op/strategy/mali.py @@ -0,0 +1,94 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of mali operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import + +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + +@conv2d_strategy.register("mali") +def conv2d_strategy_mali(attrs, inputs, out_type, target): + """conv2d mali strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = attrs.get_int_tuple("dilation") + stride_h, stride_w = attrs.get_int_tuple("strides") + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.mali.conv2d_nchw_spatial_pack), + wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_spatial_pack)) + + _, _, kh, kw = get_const_tuple(kernel.shape) + if kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 and \ + dilation_h == 1 and dilation_w == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd), + wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd), + 15) + else: + raise RuntimeError("Unsupported conv2d layout {} for mali".format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.mali.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.mali.schedule_depthwise_conv2d_nchw)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {} for mali".format(layout)) + else: # group_conv2d + raise RuntimeError("group_conv2d is not supported for mali") + return strategy + +@conv2d_winograd_without_weight_transfrom_strategy.register("mali") +def conv2d_winograd_without_weight_transfrom_strategy_mali(attrs, inputs, out_type, target): + """conv2d_winograd_without_weight_transfrom mali strategy""" + dilation = attrs.get_int_tuple("dilation") + groups = attrs.get_int("groups") + layout = attrs.data_layout + stride_h, stride_w = attrs.get_int_tuple("strides") + assert dilation == (1, 1), "Do not support dilate now" + assert groups == 1, "Do not supoort arbitrary group number" + strategy = _op.OpStrategy() + if layout == "NCHW": + _, _, kh, kw = get_const_tuple(inputs[1].shape) + assert kh == 3 and kw == 3 and stride_h == 1 and stride_w == 1 + strategy.add_implement( + wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd), + wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd)) + else: + raise RuntimeError("Unsupported conv2d_winograd_without_weight_transfrom layout {}". + format(layout)) + return strategy + +@dense_strategy.register(["mali"]) +def dense_strategy_mali(attrs, inputs, out_type, target): + """dense mali strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_dense(topi.mali.dense), + wrap_topi_schedule(topi.mali.schedule_dense)) + return strategy diff --git a/python/tvm/relay/op/strategy/opengl.py b/python/tvm/relay/op/strategy/opengl.py new file mode 100644 index 0000000000000..f5da48c150c23 --- /dev/null +++ b/python/tvm/relay/op/strategy/opengl.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of OpenGL operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + +@schedule_injective.register("opengl") +def schedule_injective_opengl(attrs, outs, target): + """schedule injective ops for opengl""" + with target: + return topi.opengl.schedule_injective(outs) + +@schedule_concatenate.register("opengl") +def schedule_concatenate_opengl(attrs, outs, target): + """schedule concatenate for opengl""" + with target: + return topi.opengl.schedule_injective(outs) + +@schedule_pool.register("opengl") +def schedule_pool_opengl(attrs, outs, target): + """schedule pooling ops for opengl""" + with target: + return topi.opengl.schedule_pool(outs, attrs.layout) + +@schedule_adaptive_pool.register("opengl") +def schedule_adaptive_pool_opengl(attrs, outs, target): + """schedule adative pooling ops for opengl""" + with target: + return topi.opengl.schedule_adaptive_pool(outs) + +@schedule_softmax.register("opengl") +def schedule_softmax_opengl(attrs, outs, target): + """schedule softmax for opengl""" + with target: + return topi.opengl.schedule_softmax(outs) + +@conv2d_strategy.register("opengl") +def conv2d_strategy_opengl(attrs, inputs, out_type, target): + """conv2d hls strategy""" + strategy = _op.OpStrategy() + groups = attrs.groups + layout = attrs.data_layout + assert groups == 1, "Don't support group conv2d on OpenGL" + assert layout == "NCHW", "Only support conv2d layout NCHW for OpenGL" + strategy.add_implement(wrap_compute_conv2d(topi.nn.conv2d), + wrap_topi_schedule(topi.opengl.schedule_conv2d_nchw)) + return strategy + +@dense_strategy.register("opengl") +def dense_strategy_opengl(attrs, inputs, out_type, target): + """dense hls strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_dense(topi.nn.dense), + wrap_topi_schedule(topi.opengl.schedule_dense)) + return strategy diff --git a/python/tvm/relay/op/strategy/rocm.py b/python/tvm/relay/op/strategy/rocm.py new file mode 100644 index 0000000000000..9e725f65c511e --- /dev/null +++ b/python/tvm/relay/op/strategy/rocm.py @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of ROCm operator strategy.""" +# pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import +from __future__ import absolute_import + +import topi +from .generic import * +from .. import op as _op + +@schedule_lrn.register("rocm") +def schedule_lrn_rocm(attrs, outs, target): + """schedule LRN for rocm""" + with target: + return topi.rocm.schedule_lrn(outs) + +@schedule_l2_normalize.register("rocm") +def schedule_l2_normalize_rocm(attrs, outs, target): + """schedule L2 normalize for rocm""" + with target: + return topi.rocm.schedule_l2_normalize(outs) + +@conv2d_strategy.register("rocm") +def conv2d_strategy_cuda(attrs, inputs, out_type, target): + """conv2d cuda strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = attrs.get_int_tuple("dilation") + groups = attrs.groups + layout = attrs.data_layout + stride_h, stride_w = attrs.get_int_tuple("strides") + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + # TODO(@vinx13, @icemelon9): Use conv2d_NCHWc_int8 when dtype is int8/uint8. + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nchw), + wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw)) + _, _, kh, kw = get_const_tuple(kernel.shape) + if kh <= 7 and kw <= 7 and kh == kw and stride_h == 1 and stride_w == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd), + wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_winograd), + 15) + elif layout == "HWCN": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_hwcn), + wrap_topi_schedule(topi.cuda.schedule_conv2d_hwcn)) + elif layout == "NHWC": + assert kernel_layout == "HWIO" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_nhwc), + wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc)) + elif layout == "NCHW4c" and data.dtype in ["int8", "uint8"]: + assert kernel_layout == "OIHW4o4i" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.conv2d_NCHWc_int8, True), + wrap_topi_schedule(topi.cuda.schedule_conv2d_NCHWc_int8)) + else: + raise RuntimeError("Unsupported conv2d layout {} for CUDA".format(layout)) + # add miopen implementation + if "miopen" in target.libs: + if layout == "NCHW": + strategy.add_implement( + wrap_compute_conv2d(topi.rocm.conv2d_nchw_miopen, True), + wrap_topi_schedule(topi.rocm.schedule_conv2d_nchw_miopen), 5) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + if layout == 'NCHW': + # TODO(@vinx13, @icemelon9): Use group_conv2d_NCHWc_int8 when dtype is int8/uint8. + assert kernel_layout == "OIHW" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.group_conv2d_nchw, has_groups=True), + wrap_topi_schedule(topi.cuda.schedule_group_conv2d_nchw)) + elif layout == 'NCHW4c' and data.dtype in ["int8", "uint8"]: + assert kernel_layout == "OIHW4o4i" + strategy.add_implement( + wrap_compute_conv2d(topi.cuda.group_conv2d_NCHWc_int8, True), + wrap_topi_schedule(topi.cuda.schedule_group_conv2d_NCHWc_int8)) + else: + raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + return strategy + +@dense_strategy.register(["rocm"]) +def dense_strategy_rocm(attrs, inputs, out_type, target): + """Dense strategy for ROCM""" + strategy = _op.OpStrategy() + assert len(inputs[0].shape) == 2 and len(inputs[1].shape) == 2, "Only support 2-dim dense" + + strategy.add_implement(wrap_compute_dense(topi.rocm.dense), + wrap_topi_schedule(topi.rocm.schedule_dense)) + if target.target_name == "rocm" and "rocblas" in target.libs: + assert out_type.dtype == inputs[0].dtype, "Mixed precision not supported." + strategy.add_implement( + wrap_compute_dense(topi.rocm.dense_rocblas), + wrap_topi_schedule(topi.rocm.dense_rocblas), 5) + return strategy diff --git a/python/tvm/relay/op/strategy/x86.py b/python/tvm/relay/op/strategy/x86.py new file mode 100644 index 0000000000000..bb6833d203c89 --- /dev/null +++ b/python/tvm/relay/op/strategy/x86.py @@ -0,0 +1,277 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Definition of x86 operator strategy.""" +# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import +from __future__ import absolute_import + +import logging + +import topi +from .generic import * +from .. import op as _op +from ....schedule import SpecializedCondition + +logger = logging.getLogger('strategy') + +@schedule_injective.register("cpu") +def schedule_injective_cpu(attrs, outs, target): + """schedule injective ops for x86""" + with target: + return topi.x86.schedule_injective(outs) + +@schedule_reduce.register("cpu") +def schedule_reduce_cpu(attrs, outs, target): + """schedule reduction ops for x86""" + with target: + return topi.x86.schedule_reduce(outs) + +@schedule_concatenate.register("cpu") +def schedule_concatenate_cpu(attrs, outs, target): + """schedule concatenate op for x86""" + with target: + return topi.x86.schedule_concatenate(outs) + +@schedule_pool.register("cpu") +def schedule_pool_cpu(attrs, outs, target): + """schedule pooling ops for x86""" + with target: + return topi.x86.schedule_pool(outs, attrs.layout) + +@schedule_adaptive_pool.register("cpu") +def schedule_adaptive_pool_cpu(attrs, outs, target): + """schedule adaptive pooling ops for x86""" + with target: + return topi.x86.schedule_adaptive_pool(outs) + +@schedule_softmax.register("cpu") +def schedule_softmax_cpu(attrs, outs, target): + """schedule softmax for x86""" + with target: + return topi.x86.schedule_softmax(outs) + +@conv2d_strategy.register("cpu") +def conv2d_strategy_cpu(attrs, inputs, out_type, target): + """conv2d x86 strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + dilation_h, dilation_w = get_const_tuple(attrs.dilation) + groups = attrs.groups + layout = attrs.data_layout + kernel_layout = attrs.kernel_layout + if dilation_h < 1 or dilation_w < 1: + raise ValueError("dilation should be positive value") + + if groups == 1: + if layout == "NCHW": + assert kernel_layout == "OIHW" + if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype): + strategy.add_implement( + wrap_compute_conv2d(topi.x86.conv2d_nchw_int8), + wrap_topi_schedule(topi.x86.schedule_conv2d_nchw_int8)) + else: + strategy.add_implement( + wrap_compute_conv2d(topi.x86.conv2d_nchw), + wrap_topi_schedule(topi.x86.schedule_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWIO" + logger.warning("For x86 target, NCHW layout is recommended for conv2d.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_nhwc), + wrap_topi_schedule(topi.x86.schedule_conv2d_nhwc)) + elif layout == "HWCN": + assert kernel_layout == "HWIO" + logger.warning("For x86 target, NCHW layout is recommended for conv2d.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.conv2d_hwcn), + wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn)) + else: + raise RuntimeError("Unsupported conv2d layout {} for cpu".format(layout)) + elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): + if layout == "NCHW": + assert kernel_layout == "OIHW" + channel_multiplier = get_const_tuple(inputs[1].shape)[1] + if channel_multiplier == 1: + strategy.add_implement( + wrap_compute_conv2d(topi.x86.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_nchw)) + else: + logger.warning("For x86 target, depthwise_conv2d with channel " + "multiplier greater than 1 is not optimized") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw)) + elif layout == "NHWC": + assert kernel_layout == "HWOI" + logger.warning("For x86 target, NCHW layout is recommended for depthwise_conv2d.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), + wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc)) + else: + raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + else: # group_conv2d + if layout == 'NCHW': + assert kernel_layout == "OIHW" + logger.warning("group_conv2d is not optimized for cpu.") + strategy.add_implement( + wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True), + wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw)) + else: + raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + return strategy + +@conv2d_NCHWc_strategy.register("cpu") +def conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target): + """conv2d_NCHWc x86 strategy""" + strategy = _op.OpStrategy() + data, kernel = inputs + if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype): + strategy.add_implement( + wrap_compute_conv2d(topi.x86.conv2d_NCHWc_int8, True, True), + wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc_int8)) + else: + strategy.add_implement( + wrap_compute_conv2d(topi.x86.conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc)) + return strategy + +@depthwise_conv2d_NCHWc_strategy.register("cpu") +def depthwise_conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target): + """depthwise_conv2d x86 strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_conv2d(topi.x86.depthwise_conv2d_NCHWc, True, True), + wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_NCHWc)) + return strategy + +@conv2d_transpose_strategy.register("cpu") +def conv2d_transpose_strategy_cpu(attrs, inputs, out_type, target): + """conv2d_transpose x86 strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + groups = attrs.groups + assert layout == "NCHW", "only support nchw for now" + assert dilation == (1, 1), "not support dilate now" + assert groups == 1, "only support groups == 1 for now" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_comptue_conv2d_transpose(topi.x86.conv2d_transpose_nchw), + wrap_topi_schedule(topi.x86.schedule_conv2d_transpose_nchw)) + return strategy + +@conv3d_strategy.register("cpu") +def conv3d_strategy_cpu(attrs, inputs, out_type, target): + """conv3d generic strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCDHW": + logger.warning("conv3d with layout NCDHW is not optimized for cpu.") + strategy.add_implement(wrap_compute_conv3d(topi.nn.conv3d_ncdhw), + wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw)) + elif layout == "NDHWC": + strategy.add_implement(wrap_compute_conv3d(topi.x86.conv3d_ndhwc), + wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc)) + else: + raise ValueError("Not support this layout {} yet".format(layout)) + return strategy + +@conv1d_strategy.register("cpu") +def conv1d_strategy_cpu(attrs, inputs, out_type, target): + """conv1d x86 strategy""" + layout = attrs.data_layout + dilation = get_const_tuple(attrs.dilation) + if dilation[0] < 1: + raise ValueError("dilation should be a positive value") + strategy = _op.OpStrategy() + if layout == "NCW": + strategy.add_implement(wrap_compute_conv1d(topi.nn.conv1d_ncw), + wrap_topi_schedule(topi.x86.schedule_conv1d_ncw)) + elif layout == "NWC": + strategy.add_implement(wrap_compute_conv1d(topi.nn.conv1d_nwc), + wrap_topi_schedule(topi.x86.schedule_conv1d_nwc)) + else: + raise ValueError("Unsupported conv1d layout {}".format(layout)) + return strategy + +@dense_strategy.register("cpu") +def dense_strategy_cpu(attrs, inputs, out_type, target): + """dense x86 strategy""" + strategy = _op.OpStrategy() + _, k = inputs[0].shape + strategy.add_implement(wrap_compute_dense(topi.x86.dense_nopack), + wrap_topi_schedule(topi.x86.schedule_dense_nopack), + 10) + if "cblas" in target.libs: + strategy.add_implement(wrap_compute_dense(topi.x86.dense_cblas), + wrap_topi_schedule(topi.x86.schedule_dense_cblas), + 5) + with SpecializedCondition(k > 16): + strategy.add_implement(wrap_compute_dense(topi.x86.dense_pack), + wrap_topi_schedule(topi.x86.schedule_dense_pack)) + return strategy + +@batch_matmul_strategy.register("cpu") +def batch_matmul_strategy_cpu(attrs, inputs, out_type, target): + """batch_matmul x86 strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_batch_matmul(topi.x86.batch_matmul), + wrap_topi_schedule(topi.x86.schedule_batch_matmul), + 10) + if "cblas" in target.libs: + strategy.add_implement(wrap_compute_batch_matmul(topi.x86.batch_matmul_cblas), + wrap_topi_schedule(topi.x86.schedule_batch_matmul_cblas), + 5) + return strategy + +@schedule_sparse_dense.register("cpu") +def schedule_sparse_dense_cpu(attrs, outs, target): + """schedule sparse_dense for x86""" + with target: + return topi.x86.schedule_sparse_dense(outs) + +@roi_align_strategy.register("cpu") +def roi_align_strategy_cpu(attrs, inputs, out_type, target): + """roi_align x86 strategy""" + strategy = _op.OpStrategy() + strategy.add_implement(wrap_compute_roi_align(topi.x86.roi_align_nchw), + wrap_topi_schedule(topi.generic.schedule_roi_align)) + return strategy + +@bitserial_conv2d_strategy.register("cpu") +def bitserial_conv2d_strategy_cpu(attrs, inputs, out_type, target): + """bitserial_conv2d x86 strategy""" + strategy = _op.OpStrategy() + layout = attrs.data_layout + if layout == "NCHW": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nchw), + wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nchw)) + elif layout == "NHWC": + strategy.add_implement( + wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nhwc), + wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nhwc)) + else: + raise ValueError("Data layout {} not supported.".format(layout)) + return strategy + +@bitserial_dense_strategy.register("cpu") +def bitserial_dense_strategy_cpu(attrs, inputs, out_type, target): + """bitserial_dense x86 strategy""" + strategy = _op.OpStrategy() + strategy.add_implement( + wrap_compute_bitserial_dense(topi.x86.bitserial_dense), + wrap_topi_schedule(topi.x86.schedule_bitserial_dense)) + return strategy diff --git a/python/tvm/relay/op/vision/_rcnn.py b/python/tvm/relay/op/vision/_rcnn.py index f35283961b277..16468e5eabc76 100644 --- a/python/tvm/relay/op/vision/_rcnn.py +++ b/python/tvm/relay/op/vision/_rcnn.py @@ -17,65 +17,27 @@ # pylint: disable=invalid-name, unused-argument """Faster R-CNN and Mask R-CNN operations.""" import topi -from topi.util import get_const_tuple, get_float_tuple, get_const_int +from topi.util import get_const_tuple from .. import op as reg +from .. import strategy from ..op import OpPattern - -@reg.register_compute("vision.roi_align") -def compute_roi_align(attrs, inputs, _, target): - """Compute definition of roi_align""" - assert attrs.layout == "NCHW" - return [topi.vision.rcnn.roi_align_nchw( - inputs[0], inputs[1], pooled_size=get_const_tuple(attrs.pooled_size), - spatial_scale=attrs.spatial_scale, sample_ratio=attrs.sample_ratio)] - -@reg.register_schedule("vision.roi_align") -def schedule_roi_align(_, outs, target): - """Schedule definition of roi_align""" - with target: - return topi.generic.vision.schedule_roi_align(outs) - +# roi_align +reg.register_strategy("vision.roi_align", strategy.roi_align_strategy) reg.register_pattern("vision.roi_align", OpPattern.OUT_ELEMWISE_FUSABLE) +# roi_pool @reg.register_compute("vision.roi_pool") -def compute_roi_pool(attrs, inputs, _, target): +def compute_roi_pool(attrs, inputs, _): """Compute definition of roi_pool""" assert attrs.layout == "NCHW" return [topi.vision.rcnn.roi_pool_nchw( inputs[0], inputs[1], pooled_size=get_const_tuple(attrs.pooled_size), spatial_scale=attrs.spatial_scale)] -@reg.register_schedule("vision.roi_pool") -def schedule_roi_pool(_, outs, target): - """Schedule definition of roi_pool""" - with target: - return topi.generic.vision.schedule_roi_pool(outs) - +reg.register_schedule("vision.roi_pool", strategy.schedule_roi_pool) reg.register_pattern("vision.roi_pool", OpPattern.OUT_ELEMWISE_FUSABLE) -@reg.register_compute("vision.proposal") -def compute_proposal(attrs, inputs, _, target): - """Compute definition of proposal""" - scales = get_float_tuple(attrs.scales) - ratios = get_float_tuple(attrs.ratios) - feature_stride = attrs.feature_stride - threshold = attrs.threshold - rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n - rpn_post_nms_top_n = attrs.rpn_post_nms_top_n - rpn_min_size = attrs.rpn_min_size - iou_loss = bool(get_const_int(attrs.iou_loss)) - with target: - return [ - topi.vision.rcnn.proposal(inputs[0], inputs[1], inputs[2], scales, ratios, - feature_stride, threshold, rpn_pre_nms_top_n, - rpn_post_nms_top_n, rpn_min_size, iou_loss) - ] - -@reg.register_schedule("vision.proposal") -def schedule_proposal(_, outs, target): - """Schedule definition of proposal""" - with target: - return topi.generic.schedule_proposal(outs) - +# proposal +reg.register_strategy("vision.proposal", strategy.proposal_strategy) reg.register_pattern("vision.proposal", OpPattern.OPAQUE) diff --git a/python/tvm/relay/op/vision/_vision.py b/python/tvm/relay/op/vision/_vision.py index 7de118071aa43..737954da82ba9 100644 --- a/python/tvm/relay/op/vision/_vision.py +++ b/python/tvm/relay/op/vision/_vision.py @@ -21,43 +21,28 @@ import topi from topi.util import get_const_int, get_const_float, get_float_tuple from .. import op as reg +from .. import strategy from ..op import OpPattern - -@reg.register_schedule("vision.multibox_prior") -def schedule_multibox_prior(_, outs, target): - """Schedule definition of multibox_prior""" - with target: - return topi.generic.schedule_multibox_prior(outs) - - +# multibox_prior @reg.register_compute("vision.multibox_prior") -def compute_multibox_prior(attrs, inputs, _, target): +def compute_multibox_prior(attrs, inputs, _): """Compute definition of multibox_prior""" sizes = get_float_tuple(attrs.sizes) ratios = get_float_tuple(attrs.ratios) steps = get_float_tuple(attrs.steps) offsets = get_float_tuple(attrs.offsets) clip = bool(get_const_int(attrs.clip)) - return [ - topi.vision.ssd.multibox_prior(inputs[0], sizes, ratios, steps, - offsets, clip) - ] - + return [topi.vision.ssd.multibox_prior(inputs[0], sizes, ratios, steps, + offsets, clip)] +reg.register_schedule("vision.multibox_prior", strategy.schedule_multibox_prior) reg.register_pattern("vision.multibox_prior", OpPattern.OPAQUE) # multibox_transform_loc -@reg.register_schedule("vision.multibox_transform_loc") -def schedule_multibox_transform_loc(_, outs, target): - """Schedule definition of multibox_detection""" - with target: - return topi.generic.schedule_multibox_transform_loc(outs) - - @reg.register_compute("vision.multibox_transform_loc") -def compute_multibox_transform_loc(attrs, inputs, _, target): +def compute_multibox_transform_loc(attrs, inputs, _): """Compute definition of multibox_detection""" clip = bool(get_const_int(attrs.clip)) threshold = get_const_float(attrs.threshold) @@ -65,57 +50,15 @@ def compute_multibox_transform_loc(attrs, inputs, _, target): return topi.vision.ssd.multibox_transform_loc( inputs[0], inputs[1], inputs[2], clip, threshold, variances) - +reg.register_schedule("vision.multibox_transform_loc", strategy.schedule_multibox_transform_loc) reg.register_pattern("vision.multibox_transform_loc", OpPattern.OPAQUE) -reg.register_pattern("vision.multibox_detection", OpPattern.OPAQUE) # Get counts of valid boxes -@reg.register_schedule("vision.get_valid_counts") -def schedule_get_valid_counts(_, outs, target): - """Schedule definition of get_valid_counts""" - with target: - return topi.generic.schedule_get_valid_counts(outs) - - -@reg.register_compute("vision.get_valid_counts") -def compute_get_valid_counts(attrs, inputs, _, target): - """Compute definition of get_valid_counts""" - score_threshold = get_const_float(attrs.score_threshold) - id_index = get_const_int(attrs.id_index) - score_index = get_const_int(attrs.score_index) - return topi.vision.get_valid_counts(inputs[0], score_threshold, - id_index, score_index) - +reg.register_strategy("vision.get_valid_counts", strategy.get_valid_counts_strategy) reg.register_pattern("vision.get_valid_counts", OpPattern.OPAQUE) # non-maximum suppression -@reg.register_schedule("vision.non_max_suppression") -def schedule_nms(_, outs, target): - """Schedule definition of nms""" - with target: - return topi.generic.schedule_nms(outs) - - -@reg.register_compute("vision.non_max_suppression") -def compute_nms(attrs, inputs, _, target): - """Compute definition of nms""" - return_indices = bool(get_const_int(attrs.return_indices)) - max_output_size = get_const_int(attrs.max_output_size) - iou_threshold = get_const_float(attrs.iou_threshold) - force_suppress = bool(get_const_int(attrs.force_suppress)) - top_k = get_const_int(attrs.top_k) - coord_start = get_const_int(attrs.coord_start) - score_index = get_const_int(attrs.score_index) - id_index = get_const_int(attrs.id_index) - invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom)) - return [ - topi.vision.non_max_suppression(inputs[0], inputs[1], max_output_size, - iou_threshold, force_suppress, top_k, - coord_start, score_index, id_index, - return_indices, invalid_to_bottom) - ] - - +reg.register_strategy("vision.non_max_suppression", strategy.nms_strategy) reg.register_pattern("vision.non_max_suppression", OpPattern.OPAQUE) diff --git a/python/tvm/relay/op/vision/_yolo.py b/python/tvm/relay/op/vision/_yolo.py index 32fc62d5c23a2..d6ac0d4bfbcf5 100644 --- a/python/tvm/relay/op/vision/_yolo.py +++ b/python/tvm/relay/op/vision/_yolo.py @@ -17,9 +17,9 @@ #pylint: disable=invalid-name, unused-argument """Backend compiler related feature registration""" from __future__ import absolute_import -from ..op import register_schedule, register_pattern -from ..op import schedule_injective, OpPattern +from ..op import register_pattern, OpPattern +from ..op import register_strategy_injective # reorg register_pattern("vision.yolo_reorg", OpPattern.INJECTIVE) -register_schedule("vision.yolo_reorg", schedule_injective) +register_strategy_injective("vision.yolo_reorg") diff --git a/python/tvm/relay/quantize/_annotate.py b/python/tvm/relay/quantize/_annotate.py index ba100d8d03e40..82b243a9fc14a 100644 --- a/python/tvm/relay/quantize/_annotate.py +++ b/python/tvm/relay/quantize/_annotate.py @@ -52,11 +52,10 @@ def simulated_quantize_compute(attrs, inputs, out_type, target): return [rdata] -_reg.register_schedule("relay.op.annotation.simulated_quantize", - _reg.schedule_injective) +_reg.register_strategy_injective("relay.op.annotation.simulated_quantize") _reg.register_pattern("relay.op.annotation.simulated_quantize", _reg.OpPattern.ELEMWISE) -_reg.register_schedule("annotation.cast_hint", _reg.schedule_injective) +_reg.register_strategy_injective("annotation.cast_hint") @register_relay_node diff --git a/python/tvm/te/schedule.py b/python/tvm/te/schedule.py index d160f78d7c898..affb284da4681 100644 --- a/python/tvm/te/schedule.py +++ b/python/tvm/te/schedule.py @@ -517,4 +517,38 @@ def opengl(self): _ffi_api.StageOpenGL(self) +@tvm._ffi.register_object +class SpecializedCondition(Object): + """Specialized condition to enable op specialization.""" + def __init__(self, conditions): + """Create a specialized condition. + + .. note:: + Conditions are represented in conjunctive joint form (CNF). + Each condition should be a simple expression, e.g., n > 16, + m % 8 == 0, etc., where n, m are tvm.Var that represents a + dimension in the tensor shape. + + Parameters + ---------- + conditions : List of tvm.Expr + List of conditions in conjunctive joint form (CNF). + """ + if not isinstance(conditions, (list, _container.Array)): + conditions = [conditions] + self.__init_handle_by_constructor__( + _ffi_api._CreateSpecializedCondition, conditions) + + def __enter__(self): + _ffi_api._EnterSpecializationScope(self) + return self + + def __exit__(self, ptype, value, trace): + _ffi_api._ExitSpecializationScope(self) + + +def current_specialization(): + return _ffi_api._GetCurrentSpecialization() + + tvm._ffi._init_api("schedule", __name__) diff --git a/python/tvm/tir/expr.py b/python/tvm/tir/expr.py index aeda603e19aa2..b0eab49ef9e00 100644 --- a/python/tvm/tir/expr.py +++ b/python/tvm/tir/expr.py @@ -964,3 +964,11 @@ class Let(PrimExprWithOp): def __init__(self, var, value, body): self.__init_handle_by_constructor__( _ffi_api.Let, var, value, body) + + +@register_object +class Any(PrimExpr): + """Any node. + """ + def __init__(self): + self.__init_handle_by_constructor__(_ffi_api.Any) diff --git a/src/api/api_ir.cc b/src/api/api_ir.cc index 1e71baf305d4f..b7ad423005f97 100644 --- a/src/api/api_ir.cc +++ b/src/api/api_ir.cc @@ -169,6 +169,7 @@ REGISTER_MAKE(Prefetch); REGISTER_MAKE(Free); REGISTER_MAKE(IfThenElse); REGISTER_MAKE(Evaluate); +REGISTER_MAKE(Any); // overloaded, needs special handling // has default args diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index e5629e8f35050..bd51fdf1d59e4 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -52,6 +52,24 @@ TVM_REGISTER_NODE_TYPE(CCacheKeyNode); TVM_REGISTER_NODE_TYPE(CCacheValueNode); TVM_REGISTER_OBJECT_TYPE(CompileEngineNode); +CachedFunc CachedFuncNode::make(tvm::Target target, + std::string func_name, + tvm::Array inputs, + tvm::Array outputs, + te::Schedule schedule, + tvm::Array funcs, + tvm::Array shape_func_param_states) { + auto n = make_object(); + n->target = std::move(target); + n->func_name = func_name; + n->inputs = std::move(inputs); + n->outputs = std::move(outputs); + n->schedule = std::move(schedule); + n->funcs = std::move(funcs); + n->shape_func_param_states = std::move(shape_func_param_states); + return CachedFunc(n); +} + CCacheKey CCacheKeyNode::make(Function source_func, Target target) { auto n = make_object(); n->source_func = std::move(source_func); @@ -100,6 +118,7 @@ Array GetShape(const Array& shape) { return res; } +/* // The getter to get schedule from compile engine. // Get schedule from functor. class ScheduleGetter : @@ -208,7 +227,7 @@ class ScheduleGetter : LOG(FATAL) << "not handled"; return tvm::PrimExpr(); } - }, "compile_engine_const", topi::kBroadcast); + }, "compile_engine_const", topi::kBroadcast); scalars_.push_back(value->op); return {value}; } @@ -231,7 +250,7 @@ class ScheduleGetter : } if (count_tuple) { CHECK_EQ(call_node->args.size(), 1U) - << "Only allow function with a single tuple input"; + << "Only allow function with a single tuple input"; } // Prepare the call_node->checked_type(). For the call node inputs, we ensure that the shape is @@ -253,7 +272,7 @@ class ScheduleGetter : } CHECK(call_node->op.as()) - << "Primitive function only allows call into primitive ops"; + << "Primitive function only allows call into primitive ops"; Op op = Downcast(call_node->op); Array outputs; // Skip fcompute for device copy operators as it is not registered. @@ -269,8 +288,8 @@ class ScheduleGetter : int op_pattern = fpattern[op]; if (op_pattern >= kCommReduce) { CHECK(!master_op_.defined() || master_op_pattern_ < kCommReduce) - << "Two complicated op in a primitive function " - << " master=" << master_op_ << " current=" << op; + << "Two complicated op in a primitive function " + << " master=" << master_op_ << " current=" << op; } if (op_pattern >= master_op_pattern_) { master_op_ = op; @@ -339,6 +358,7 @@ class ScheduleGetter : // overhead for each invocation of call node when retrieving schedules. const Op& device_copy_op_; }; +*/ // Creates shape function from functor. class MakeShapeFunc : public ExprFunctor(const Expr&)> { @@ -677,9 +697,14 @@ class CompileEngineImpl : public CompileEngineNode { * \return Pair of schedule and cache. * The funcs field in cache is not yet populated. */ - std::pair CreateSchedule( - const Function& source_func, const Target& target) { - return ScheduleGetter(target).Create(source_func); + CachedFunc CreateSchedule(const Function& source_func, const Target& target) { + CachedFunc cfunc; + if (const auto* f = runtime::Registry::Get("relay.backend.create_schedule")) { + cfunc = (*f)(source_func, target); + } else { + LOG(FATAL) << "relay.backend.create_schedule is not registered"; + } + return cfunc; } private: @@ -713,9 +738,9 @@ class CompileEngineImpl : public CompileEngineNode { With target_scope(key->target); CHECK(!value->cached_func.defined()); - auto spair = CreateSchedule(key->source_func, key->target); + auto cfunc = CreateSchedule(key->source_func, key->target); auto cache_node = make_object( - *(spair.second.operator->())); + *(cfunc.operator->())); // Skip lowering for device copy node. const Expr body = (key->source_func)->body; @@ -735,11 +760,12 @@ class CompileEngineImpl : public CompileEngineNode { // lower the function if (const auto* f = runtime::Registry::Get("relay.backend.lower")) { cache_node->funcs = (*f)( - spair.first, all_args, cache_node->func_name, key->source_func); + cfunc->schedule, all_args, cache_node->func_name, key->source_func); } else { tvm::BuildConfig bcfg = BuildConfig::Create(); std::unordered_map binds; - cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg); + cache_node->funcs = tvm::lower(cfunc->schedule, all_args, cache_node->func_name, + binds, bcfg); } value->cached_func = CachedFunc(cache_node); return value; @@ -820,6 +846,9 @@ const CompileEngine& CompileEngine::Global() { return *inst; } +TVM_REGISTER_GLOBAL("relay.backend._make_CachedFunc") +.set_body_typed(CachedFuncNode::make); + TVM_REGISTER_GLOBAL("relay.backend._make_CCacheKey") .set_body_typed(CCacheKeyNode::make); diff --git a/src/relay/backend/compile_engine.h b/src/relay/backend/compile_engine.h index 15ec2d6bd0f18..a405b208ddcb6 100644 --- a/src/relay/backend/compile_engine.h +++ b/src/relay/backend/compile_engine.h @@ -44,6 +44,7 @@ enum ShapeFuncParamState { kNeedBoth = 3, }; +class CachedFunc; /*! \brief Node container to represent a cached function. */ struct CachedFuncNode : public Object { /* \brief compiled target */ @@ -54,6 +55,8 @@ struct CachedFuncNode : public Object { tvm::Array inputs; /* \brief The outputs to the function */ tvm::Array outputs; + /* \brief The schedule to the function */ + te::Schedule schedule; /*! \brief The lowered functions to support the function. */ tvm::Array funcs; /*! \brief Parameter usage states in the shape function. */ @@ -64,10 +67,19 @@ struct CachedFuncNode : public Object { v->Visit("func_name", &func_name); v->Visit("inputs", &inputs); v->Visit("outputs", &outputs); + v->Visit("schedule", &schedule); v->Visit("funcs", &funcs); v->Visit("shape_func_param_states", &shape_func_param_states); } + TVM_DLL static CachedFunc make(tvm::Target target, + std::string func_name, + tvm::Array inputs, + tvm::Array outputs, + te::Schedule schedule, + tvm::Array funcs, + tvm::Array shape_func_param_states); + static constexpr const char* _type_key = "relay.CachedFunc"; TVM_DECLARE_FINAL_OBJECT_INFO(CachedFuncNode, Object); }; diff --git a/src/relay/ir/expr.cc b/src/relay/ir/expr.cc index 0292a6c2bb057..f63fc7a26c20d 100644 --- a/src/relay/ir/expr.cc +++ b/src/relay/ir/expr.cc @@ -354,6 +354,12 @@ TVM_REGISTER_GLOBAL("relay._expr.TempExprRealize") return temp->Realize(); }); +TVM_REGISTER_GLOBAL("relay._expr.FunctionGetAttr") +.set_body_typed( + [](Function func, std::string name) { + return FunctionGetAttr(func, name); +}); + TVM_REGISTER_GLOBAL("relay._expr.FunctionSetAttr") .set_body_typed( [](Function func, std::string name, ObjectRef ref) { diff --git a/src/relay/ir/op_attr_types.cc b/src/relay/ir/op_attr_types.cc new file mode 100644 index 0000000000000..38f890ba75d4f --- /dev/null +++ b/src/relay/ir/op_attr_types.cc @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include + +namespace tvm { +namespace relay { + +TVM_REGISTER_NODE_TYPE(OpImplementNode); +TVM_REGISTER_NODE_TYPE(OpSpecializationNode); +TVM_REGISTER_NODE_TYPE(OpStrategyNode); + +Array OpImplement::Compute(const Attrs& attrs, + const Array& inputs, + const Type& out_type) { + return (*this)->fcompute(attrs, inputs, out_type); +} + +te::Schedule OpImplement::Schedule(const Attrs& attrs, + const Array &outs, + const Target& target) { + return (*this)->fschedule(attrs, outs, target); +} + +void OpSpecialization::AddImplement(tvm::relay::FTVMCompute fcompute, + tvm::relay::FTVMSchedule fschedule, + int plevel) { + auto n = make_object(); + n->fcompute = fcompute; + n->fschedule = fschedule; + n->plevel = IntImm(DataType::Int(32), plevel); + (*this)->implements.push_back(OpImplement(n)); +} + +void OpStrategy::AddImplement(FTVMCompute fcompute, + FTVMSchedule fschedule, + int plevel) { + auto curr_cond = te::SpecializedCondition::Current(); + auto specializations = (*this)->specializations; + OpSpecialization op_spec; + for (auto e : specializations) { + if (e->condition == curr_cond) { + op_spec = e; + break; + } + } + if (op_spec.defined()) { + op_spec.AddImplement(fcompute, fschedule, plevel); + } else { + ObjectPtr n = make_object(); + n->condition = curr_cond; + op_spec = OpSpecialization(n); + op_spec.AddImplement(fcompute, fschedule, plevel); + (*this)->specializations.push_back(op_spec); + } +} + +TVM_REGISTER_GLOBAL("relay.op._OpImplementCompute") +.set_body([](TVMArgs args, TVMRetValue* rv) { + OpImplement imp = args[0]; + Attrs attrs = args[1]; + Array inputs = args[2]; + Type out_type = args[3]; + *rv = imp.Compute(attrs, inputs, out_type); +}); + +TVM_REGISTER_GLOBAL("relay.op._OpImplementSchedule") +.set_body([](TVMArgs args, TVMRetValue* rv) { + OpImplement imp = args[0]; + Attrs attrs = args[1]; + Array outs = args[2]; + Target target = args[3]; + *rv = imp.Schedule(attrs, outs, target); +}); + +TVM_REGISTER_GLOBAL("relay.op._make.OpStrategy") +.set_body([](TVMArgs args, TVMRetValue* rv) { + ObjectPtr n = make_object(); + *rv = OpStrategy(n); +}); + +TVM_REGISTER_GLOBAL("relay.op._OpStrategyAddImplement") +.set_body([](TVMArgs args, TVMRetValue* rv) { + OpStrategy strategy = args[0]; + FTVMCompute compute = args[1]; + FTVMSchedule schedule = args[2]; + int plevel = args[3]; + strategy.AddImplement(compute, schedule, plevel); +}); + + +} // namespace relay +} // namespace tvm diff --git a/src/relay/op/annotation/annotation.cc b/src/relay/op/annotation/annotation.cc index 6106b07f543b2..36f592355a2c4 100644 --- a/src/relay/op/annotation/annotation.cc +++ b/src/relay/op/annotation/annotation.cc @@ -79,7 +79,7 @@ TVM_ADD_FILELINE) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -105,7 +105,7 @@ TVM_ADD_FILELINE) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -123,7 +123,7 @@ Mark the start of bitpacking. ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -140,7 +140,7 @@ Mark the end of bitpacking. ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -163,7 +163,7 @@ Mark a checkpoint for checkpointing memory optimization. ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { Array outputs; for (size_t i = 0; i < inputs.size(); ++i) { outputs.push_back(topi::identity(inputs[i])); @@ -184,7 +184,7 @@ Beginning of a region that is handled by a given compiler. ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -209,7 +209,7 @@ End of a region that is handled by a given compiler. ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); diff --git a/src/relay/op/debug.cc b/src/relay/op/debug.cc index 14c0a01576d57..a0f7fbf4cfeb7 100644 --- a/src/relay/op/debug.cc +++ b/src/relay/op/debug.cc @@ -36,9 +36,8 @@ namespace relay { TVM_REGISTER_NODE_TYPE(DebugAttrs); Array DebugCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { return Array{ topi::identity(inputs[0]) }; } diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 076e3fcb0dbb1..d15099b6b451b 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -83,7 +83,7 @@ RELAY_REGISTER_OP("memory.alloc_storage") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -179,7 +179,7 @@ RELAY_REGISTER_OP("memory.alloc_tensor") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -228,7 +228,7 @@ RELAY_REGISTER_OP("memory.invoke_tvm_op") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -252,7 +252,7 @@ RELAY_REGISTER_OP("memory.kill") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); @@ -340,7 +340,7 @@ RELAY_REGISTER_OP("memory.shape_func") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + const Type& out_dtype) -> Array { return {topi::identity(inputs[0])}; }); diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc index 6977ac9b8575a..cd9b5ddc7fbfd 100644 --- a/src/relay/op/nn/convolution.cc +++ b/src/relay/op/nn/convolution.cc @@ -735,58 +735,6 @@ weight transformation in advance. .add_type_rel("Conv2DWinogradWeightTransform", Conv2DWinogradWeightTransformRel); -// Positional relay function to create conv2d winograd nnpack operator -// used by frontend FFI. -Expr MakeConv2DWinogradNNPACK(Expr data, - Expr weight, - Array strides, - Array padding, - Array dilation, - int groups, - IndexExpr channels, - Array kernel_size, - std::string data_layout, - std::string kernel_layout, - std::string out_layout, - DataType out_dtype) { - auto attrs = make_object(); - attrs->strides = std::move(strides); - attrs->padding = std::move(padding); - attrs->dilation = std::move(dilation); - attrs->groups = groups; - attrs->channels = channels; - attrs->kernel_size = std::move(kernel_size); - attrs->data_layout = std::move(data_layout); - attrs->kernel_layout = std::move(kernel_layout); - attrs->out_layout = std::move(out_layout); - attrs->out_dtype = std::move(out_dtype); - static const Op& op = Op::Get("nn.contrib_conv2d_winograd_nnpack_without_weight_transform"); - return CallNode::make(op, {data, weight}, Attrs(attrs), {}); -} - -TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_conv2d_winograd_nnpack_without_weight_transform") -.set_body_typed(MakeConv2DWinogradNNPACK); - -RELAY_REGISTER_OP("nn.contrib_conv2d_winograd_nnpack_without_weight_transform") -.describe(R"code(Compute conv2d with winograd nnpack. Only supports NCHW layout. - This operator assumes the weight tensor is already pre-transformed by - nn.contrib_conv2d_winograd_nnpack_weight_transform. - -- **data**: Input is 4D array of shape (batch_size, in_channels, height, width) -- **weight**: Any shape - We do not check the shape for this input tensor. Since different backend - has different layout strategy. - -- **out**: Output is 4D array of shape (batch_size, channels, out_height, out_width) -)code" TVM_ADD_FILELINE) -.set_attrs_type() -.set_num_inputs(2) -.add_argument("data", "Tensor", "The input tensor.") -.add_argument("weight", "Tensor", "The weight tensor.") -.set_support_level(10) -.add_type_rel("Conv2DWinogradNNPACKRel", Conv2DWinogradRel) -.set_attr("FInferCorrectLayout", ConvInferCorrectLayout); - // relay.nn.contrib_conv2d_winograd_nnpack_weight_transform TVM_REGISTER_NODE_TYPE(Conv2DWinogradNNPACKWeightTransformAttrs); @@ -848,55 +796,6 @@ weight transformation in advance. .set_support_level(10) .add_type_rel("Conv2DWinogradNNPACKWeightTransform", Conv2DWinogradNNPACKWeightTransformRel); -// Positional relay function to create conv2d NCHWc operator -// used by frontend FFI. -Expr MakeConv2DNCHWcInt8(Expr data, - Expr kernel, - Array strides, - Array padding, - Array dilation, - int groups, - IndexExpr channels, - Array kernel_size, - std::string data_layout, - std::string kernel_layout, - std::string out_layout, - DataType out_dtype) { - auto attrs = make_object(); - attrs->strides = std::move(strides); - attrs->padding = std::move(padding); - attrs->dilation = std::move(dilation); - attrs->groups = groups; - attrs->channels = channels; - attrs->kernel_size = std::move(kernel_size); - attrs->data_layout = std::move(data_layout); - attrs->kernel_layout = std::move(kernel_layout); - attrs->out_layout = std::move(out_layout); - attrs->out_dtype = std::move(out_dtype); - static const Op& op = Op::Get("nn.contrib_conv2d_NCHWc_int8"); - return CallNode::make(op, {data, kernel}, Attrs(attrs), {}); -} - -TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_conv2d_NCHWc_int8") -.set_body_typed(MakeConv2DNCHWcInt8); - - -RELAY_REGISTER_OP("nn.contrib_conv2d_NCHWc_int8") -.describe(R"code(Compute conv2d with NCHWc data layout with int8 inputs. -- **data**: Input is 5D packed tensor. -- **weight**: 7D packed tensor. - -- **out**: Output is 5D packed tensor -)code" TVM_ADD_FILELINE) -.set_attrs_type() -.set_num_inputs(2) -.add_argument("data", "Tensor", "The input tensor.") -.add_argument("weight", "Tensor", "The weight tensor.") -.set_support_level(10) -.add_type_rel("Conv2DNCHWcInt8", Conv2DWinogradRel) -.set_attr("FInferCorrectLayout", - ConvInferCorrectLayout); - // Positional relay function to create conv2d NCHWc operator // used by frontend FFI. Expr MakeConv2DNCHWc(Expr data, diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index ee4471a85c17a..10fd4d975ce4b 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -93,8 +93,9 @@ RELAY_REGISTER_OP("nn.bias_add") .add_argument("bias", "1D Tensor", "Bias.") .set_support_level(1) .add_type_rel("BiasAdd", BiasAddRel) -.set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, const Target& target) { +.set_attr("FTVMCompute", [](const Attrs& attrs, + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); return tvm::Array{topi::nn::bias_add(inputs[0], inputs[1], param->axis)}; }); @@ -234,8 +235,7 @@ RELAY_REGISTER_OP("nn.leaky_relu") .set_attr( "FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { const auto* param = attrs.as(); return Array{ topi::leaky_relu(inputs[0], param->alpha) }; }); @@ -315,8 +315,7 @@ where :math:`*` is an channelwise multiplication for each sample in the batch. .set_attr( "FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { const auto* param = attrs.as(); return Array{ topi::prelu(inputs[0], inputs[1], param->axis)}; }); @@ -351,8 +350,7 @@ RELAY_REGISTER_OP("nn.softmax") .add_type_rel("Identity", IdentityRel) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); return Array{ topi::nn::softmax(inputs[0], param->axis) }; @@ -385,8 +383,7 @@ RELAY_REGISTER_OP("nn.log_softmax") .add_type_rel("Identity", IdentityRel) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); CHECK(param->axis == -1 || param->axis == static_cast(inputs[0].ndim()) - 1) @@ -462,8 +459,7 @@ Example:: .set_attr( "FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { return Array{ topi::nn::flatten(inputs[0]) }; }); @@ -489,8 +485,7 @@ RELAY_REGISTER_OP("nn.relu") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { return Array{ topi::relu(inputs[0], 0.0f) }; }); diff --git a/src/relay/op/nn/pad.cc b/src/relay/op/nn/pad.cc index 94602ec9a61aa..84a49403e8375 100644 --- a/src/relay/op/nn/pad.cc +++ b/src/relay/op/nn/pad.cc @@ -161,9 +161,8 @@ bool PadRel(const Array& types, } Array PadCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc index 6775b09e8aa94..e9057b7ac0869 100644 --- a/src/relay/op/nn/pooling.cc +++ b/src/relay/op/nn/pooling.cc @@ -164,9 +164,8 @@ bool Pool2DRel(const Array& types, template Array Pool2DCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -331,9 +330,8 @@ bool GlobalPool2DRel(const Array& types, template Array GlobalPool2DCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -465,9 +463,8 @@ bool AdaptivePool2DRel(const Array& types, template Array AdaptivePool2DCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -593,8 +590,9 @@ bool Pool2DGradRel(const Array& types, int num_inputs, const Attrs& attrs, } template -Array Pool2DGradCompute(const Attrs& attrs, const Array& inputs, - const Type& out_type, const Target& target) { +Array Pool2DGradCompute(const Attrs& attrs, + const Array& inputs, + const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -793,9 +791,8 @@ bool Pool1DRel(const Array& types, template Array Pool1DCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { static const Layout kNCW("NCW"); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -985,9 +982,8 @@ bool Pool3DRel(const Array& types, template Array Pool3DCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { static const Layout kNCDHW("NCDHW"); const auto* param = attrs.as(); CHECK(param != nullptr); diff --git a/src/relay/op/tensor/binary.cc b/src/relay/op/tensor/binary.cc index d1b915cfa1429..58221ae66f6e3 100644 --- a/src/relay/op/tensor/binary.cc +++ b/src/relay/op/tensor/binary.cc @@ -32,9 +32,8 @@ namespace relay { #define RELAY_BINARY_COMPUTE(FTOPI) \ [] (const Attrs& attrs, \ - const Array& inputs, \ - const Type& out_type, \ - const Target& target) -> Array { \ + const Array& inputs, \ + const Type& out_type) -> Array { \ CHECK_EQ(inputs.size(), 2U); \ return {FTOPI(inputs[0], inputs[1])}; \ } \ diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index acbde0d6e28b5..5e0795eaa60bd 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -176,7 +176,6 @@ template Array ReduceCompute(const Attrs& attrs, const Array& inputs, const Type& out_type, - const Target& target, F f) { const ReduceAttrs* param = attrs.as(); CHECK(param != nullptr); @@ -321,10 +320,9 @@ bool ReduceRel(const Array& types, Array ArgMaxCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::argmax); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::argmax); } @@ -341,10 +339,9 @@ values over a given axis. Array ArgMinCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::argmin); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::argmin); } RELAY_REGISTER_REDUCE_OP("argmin") @@ -359,10 +356,9 @@ values over a given axis. .set_attr("TOpPattern", kCommReduce); Array SumCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::sum); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::sum); } @@ -393,10 +389,9 @@ Example:: Array AllCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::all); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::all); } @@ -430,10 +425,9 @@ Example:: Array AnyCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::any); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::any); } @@ -467,10 +461,9 @@ Example:: Array MaxCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::max); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::max); } RELAY_REGISTER_REDUCE_OP("max") @@ -485,10 +478,9 @@ RELAY_REGISTER_REDUCE_OP("max") Array MinCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::min); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::min); } @@ -504,10 +496,9 @@ RELAY_REGISTER_REDUCE_OP("min") Array ProdCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { - return ReduceCompute(attrs, inputs, out_type, target, topi::prod); + const Array& inputs, + const Type& out_type) { + return ReduceCompute(attrs, inputs, out_type, topi::prod); } RELAY_REGISTER_REDUCE_OP("prod") @@ -534,9 +525,8 @@ Example:: Array MeanCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { IndexExpr count = tir::make_const(inputs[0]->dtype, 1); const ReduceAttrs* param = attrs.as(); CHECK(param != nullptr); @@ -546,7 +536,7 @@ Array MeanCompute(const Attrs& attrs, param->exclude)) { count *= inputs[0]->shape[i]; } - auto res = ReduceCompute(attrs, inputs, out_type, target, topi::sum); + auto res = ReduceCompute(attrs, inputs, out_type, topi::sum); return {topi::divide(res[0], count)}; } @@ -599,9 +589,8 @@ bool VarianceRel(const Array& types, } Array VarianceCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { IndexExpr count = tir::make_const(inputs[0]->dtype, 1); const ReduceAttrs* param = attrs.as(); CHECK(param != nullptr); @@ -615,7 +604,7 @@ Array VarianceCompute(const Attrs& attrs, } std::vector expand_shape; auto sq_diff = topi::power(topi::subtract(data, mean), 2); - auto var = topi::divide(ReduceCompute(attrs, {sq_diff}, out_type, target, topi::sum)[0], count); + auto var = topi::divide(ReduceCompute(attrs, {sq_diff}, out_type, topi::sum)[0], count); return {var}; } diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 969912f4de8be..53bcba7f1356a 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -66,9 +66,8 @@ bool CastRel(const Array& types, } Array CastCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const CastAttrs *param = attrs.as(); CHECK(param != nullptr); DataType dtype = param->dtype; @@ -126,9 +125,8 @@ bool CastLikeRel(const Array& types, Array CastLikeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { return { topi::cast(inputs[0], inputs[1]->dtype) }; } @@ -156,8 +154,9 @@ RELAY_REGISTER_OP("cast_like") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -Array ReinterpretCompute(const Attrs& attrs, const Array& inputs, - const Type& out_type, const Target& target) { +Array ReinterpretCompute(const Attrs& attrs, + const Array& inputs, + const Type& out_type) { const CastAttrs* param = attrs.as(); CHECK(param != nullptr); DataType dtype = param->dtype; @@ -231,9 +230,8 @@ bool ExpandDimsRel(const Array& types, } Array ExpandDimsCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const ExpandDimsAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::expand_dims(inputs[0], param->axis, param->num_newaxis) }; @@ -270,9 +268,8 @@ RELAY_REGISTER_OP("expand_dims") TVM_REGISTER_NODE_TYPE(ConcatenateAttrs); Array ConcatenateCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const ConcatenateAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::concatenate(inputs, param->axis) }; @@ -413,9 +410,8 @@ bool StackRel(const Array& types, } Array StackCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const StackAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::stack(inputs, param->axis) }; @@ -505,9 +501,8 @@ bool TransposeRel(const Array& types, } Array TransposeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); return Array{ topi::transpose(inputs[0], param->axes) }; @@ -688,9 +683,8 @@ bool ReshapeRel(const Array& types, } Array ReshapeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* out_ttype = out_type.as(); CHECK(out_ttype != nullptr); Array newshape; @@ -923,9 +917,8 @@ bool TakeRel(const Array& types, } Array TakeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); if (!param->axis.defined()) { @@ -1010,9 +1003,8 @@ bool FullRel(const Array& types, } Array FullCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* out_ttype = out_type.as(); return { topi::full(out_ttype->shape, out_ttype->dtype, inputs[0]()) }; } @@ -1118,9 +1110,8 @@ bool FullLikeRel(const Array& types, } Array FullLikeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { return { topi::full_like(inputs[0], inputs[1]()) }; } @@ -1230,9 +1221,8 @@ inline te::Tensor DynamicArange(const te::Tensor& start, } Array ArangeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const ArangeAttrs* param = attrs.as(); te::Tensor start = inputs[0]; te::Tensor stop = inputs[1]; @@ -1325,9 +1315,8 @@ bool RepeatRel(const Array& types, } Array RepeatCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const RepeatAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::repeat(inputs[0], param->repeats, param->axis) }; @@ -1436,9 +1425,8 @@ bool TileRel(const Array& types, } Array TileCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const TileAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::tile(inputs[0], param->reps) }; @@ -1497,9 +1485,8 @@ bool ReverseRel(const Array& types, } Array ReverseCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const ReverseAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::flip(inputs[0], param->axis) }; @@ -1571,9 +1558,8 @@ Expr MakeWhere(const Expr& condition, const Expr& x, const Expr& y) { } Array WhereCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { return { topi::where(inputs[0], inputs[1], inputs[2]) }; } @@ -1688,9 +1674,8 @@ bool SqueezeRel(const Array& types, } Array SqueezeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const SqueezeAttrs *param = attrs.as(); CHECK(param != nullptr); return { topi::squeeze(inputs[0], param->axis) }; @@ -1729,9 +1714,8 @@ Expr MakeCollapseSumLike(Expr data, } Array CollapseSumLikeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* out_ttype = out_type.as(); CHECK(out_ttype != nullptr); return { topi::collapse_sum(inputs[0], out_ttype->shape) }; @@ -1774,9 +1758,8 @@ Expr MakeBroadCastTo(Expr data, Array shape) { } Array BroadCastToCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { auto ioattrs = attrs.as(); CHECK(ioattrs != nullptr); return { topi::broadcast_to(inputs[0], ioattrs->shape) }; @@ -1812,9 +1795,8 @@ Expr MakeBroadCastToLike(Expr data, } Array BroadCastToLikeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* out_ttype = out_type.as(); CHECK(out_ttype != nullptr); return { topi::broadcast_to(inputs[0], out_ttype->shape) }; @@ -2019,9 +2001,8 @@ Expr MakeStridedSlice(Expr data, } Array StridedSliceCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const StridedSliceAttrs *param = attrs.as(); CHECK(param != nullptr); return Array{ @@ -2176,9 +2157,8 @@ bool SplitRel(const Array& types, } Array SplitCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto param = attrs.as(); CHECK(param != nullptr); @@ -2305,9 +2285,8 @@ Expr MakeSliceLike(Expr data, } Array SliceLikeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); Array src_shape = inputs[0]->shape; @@ -2371,9 +2350,8 @@ RELAY_REGISTER_OP("slice_like") TVM_REGISTER_NODE_TYPE(LayoutTransformAttrs); Array LayoutTransformCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); return Array{ @@ -2504,9 +2482,8 @@ bool GatherNDRel(const Array& types, } Array GatherNDCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { return { topi::gather_nd(inputs[0], inputs[1]) }; } @@ -2558,9 +2535,8 @@ bool SequenceMaskRel(const Array& types, } Array SequenceMaskCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); return Array{ @@ -2671,9 +2647,8 @@ bool OneHotRel(const Array& types, } Array OneHotCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { const auto* param = attrs.as(); CHECK(param != nullptr); return Array { diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc index 7f6db50bf7027..caa6451542c98 100644 --- a/src/relay/op/tensor/unary.cc +++ b/src/relay/op/tensor/unary.cc @@ -34,9 +34,8 @@ namespace relay { #define RELAY_UNARY_COMPUTE(FTOPI) \ [] (const Attrs& attrs, \ - const Array& inputs, \ - const Type& out_type, \ - const Target& target) -> Array { \ + const Array& inputs, \ + const Type& out_type) -> Array { \ return {FTOPI(inputs[0])}; \ } \ @@ -302,9 +301,8 @@ bool ShapeOfRel(const Array& types, } Array ShapeOfCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { CHECK_EQ(inputs.size(), 1); const auto* param = attrs.as(); CHECK(param != nullptr); @@ -353,9 +351,8 @@ bool NdarraySizeRel(const Array& types, } Array NdarraySizeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { + const Array& inputs, + const Type& out_type) { CHECK_EQ(inputs.size(), 1); const auto* param = attrs.as(); CHECK(param != nullptr); diff --git a/src/relay/op/vision/yolo.cc b/src/relay/op/vision/yolo.cc index 9c4a2850903bf..7d152718f3a0f 100644 --- a/src/relay/op/vision/yolo.cc +++ b/src/relay/op/vision/yolo.cc @@ -83,8 +83,7 @@ Its function is mostly shape transform.")doc" TVM_ADD_FILELINE) .add_type_rel("YoloReorg", YoloReorgRel) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, - const Type& out_type, - const Target& target) { + const Type& out_type) { const auto* params = attrs.as(); CHECK(params != nullptr); return Array{ topi::vision::reorg(inputs[0], params->stride) }; diff --git a/src/relay/pass/alter_op_layout.cc b/src/relay/pass/alter_op_layout.cc index 0cc3ff090dd8d..fe8862523dda7 100644 --- a/src/relay/pass/alter_op_layout.cc +++ b/src/relay/pass/alter_op_layout.cc @@ -83,7 +83,10 @@ class AlterTransformMemorizer : public TransformMemorizer { auto ttype = expr->type_as(); tinfos.push_back(tvm::te::placeholder(ttype->shape, ttype->dtype)); } - Expr altered_value = falter_layout[op](ref_call->attrs, new_args, tinfos); + // TODO(@kevinthesun, @icemelon9): This won't work if inputs/outputs are dynamic shapes. + // Probably we need to disable the AlterOpLayout when compiling dynamic models. + Expr altered_value = falter_layout[op](ref_call->attrs, new_args, tinfos, + ref_call->checked_type()); if (altered_value.defined()) { new_e = altered_value; modified = true; diff --git a/src/te/schedule/schedule_lang.cc b/src/te/schedule/schedule_lang.cc index 1763bd64c15ff..9120ba7cf5297 100644 --- a/src/te/schedule/schedule_lang.cc +++ b/src/te/schedule/schedule_lang.cc @@ -20,8 +20,11 @@ /*! * \file schedule_lang.cc */ +#include #include #include +#include +#include #include #include "graph.h" @@ -786,6 +789,67 @@ IterVarRelation SingletonNode::make(IterVar iter) { return IterVarRelation(n); } +SpecializedCondition SpecializedConditionNode::make(Array conditions) { + auto n = make_object(); + n->clauses = conditions; + return SpecializedCondition(n); +} + +/*! \brief Entry to hold the SpecializedCondition context stack. */ +struct TVMSpecializationThreadLocalEntry { + /*! \brief The current specialized condition */ + std::stack condition_stack; +}; + +/*! \brief Thread local store to hold the Target context stack. */ +typedef dmlc::ThreadLocalStore TVMSpecializationThreadLocalStore; + +void SpecializedCondition::EnterWithScope() { + TVMSpecializationThreadLocalEntry *entry = TVMSpecializationThreadLocalStore::Get(); + entry->condition_stack.push(*this); +} + +void SpecializedCondition::ExitWithScope() { + TVMSpecializationThreadLocalEntry *entry = TVMSpecializationThreadLocalStore::Get(); + CHECK(!entry->condition_stack.empty()); + CHECK(entry->condition_stack.top().same_as(*this)); + entry->condition_stack.pop(); +} + +SpecializedCondition SpecializedCondition::Current() { + TVMSpecializationThreadLocalEntry *entry = TVMSpecializationThreadLocalStore::Get(); + SpecializedCondition cond; + if (entry->condition_stack.size() > 0) { + cond = entry->condition_stack.top(); + } + return cond; +} + +TVM_REGISTER_GLOBAL("_CreateSpecializedCondition") +.set_body_typed(SpecializedConditionNode::make); + +TVM_REGISTER_GLOBAL("_GetCurrentSpecialization") +.set_body([](TVMArgs args, TVMRetValue* ret) { + *ret = SpecializedCondition::Current(); +}); + +class SpecializedCondition::Internal { + public: + static void EnterScope(SpecializedCondition cond) { + cond.EnterWithScope(); + } + + static void ExitScope(SpecializedCondition cond) { + cond.ExitWithScope(); + } +}; + +TVM_REGISTER_GLOBAL("_EnterSpecializationScope") +.set_body_typed(SpecializedCondition::Internal::EnterScope); + +TVM_REGISTER_GLOBAL("_ExitSpecializationScope") +.set_body_typed(SpecializedCondition::Internal::ExitScope); + TVM_REGISTER_NODE_TYPE(StageNode); TVM_REGISTER_NODE_TYPE(IterVarAttrNode); TVM_REGISTER_NODE_TYPE(SplitNode); @@ -793,6 +857,7 @@ TVM_REGISTER_NODE_TYPE(FuseNode); TVM_REGISTER_NODE_TYPE(RebaseNode); TVM_REGISTER_NODE_TYPE(SingletonNode); TVM_REGISTER_NODE_TYPE(ScheduleNode); +TVM_REGISTER_NODE_TYPE(SpecializedConditionNode); // Printer TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) @@ -847,6 +912,13 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) .set_dispatch([](const ObjectRef& node, ReprPrinter* p) { auto* op = static_cast(node.get()); p->stream << "schedule(" << op << ")"; - }); +}) +.set_dispatch([](const ObjectRef& node, ReprPrinter* p) { + auto* op = static_cast(node.get()); + p->stream << "specialization("; + p->Print(op->clauses); + p->stream << ')'; +}); + } // namespace te } // namespace tvm diff --git a/tests/python/relay/test_autotvm_task_extraction.py b/tests/python/relay/test_autotvm_task_extraction.py index 8f550d82c4f6f..73dbf106b541b 100644 --- a/tests/python/relay/test_autotvm_task_extraction.py +++ b/tests/python/relay/test_autotvm_task_extraction.py @@ -39,25 +39,28 @@ def test_task_extraction(): target = 'llvm' mod_list = [] params_list = [] + conv2d = relay.op.get("nn.conv2d") + conv2d_transpose = relay.op.get("nn.conv2d_transpose") + dense = relay.op.get("nn.dense") mod, params, _ = get_network('resnet-18', batch_size=1) tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) assert len(tasks) == 12 tasks = autotvm.task.extract_from_program(mod, target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) assert len(tasks) == 12 mod, params, _ = get_network('resnet-18', batch_size=1) tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params, - ops=(relay.op.nn.dense,)) + ops=(dense,)) assert len(tasks) == 1 tasks = autotvm.task.extract_from_program(mod, target=target, params=params, - ops=(relay.op.nn.dense,)) + ops=(dense,)) assert len(tasks) == 1 mod, params, _ = get_network('resnet-18', batch_size=1) @@ -65,11 +68,14 @@ def test_task_extraction(): params_list.append(params) tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params, - ops=(relay.op.nn.conv2d, relay.op.nn.dense)) + ops=(conv2d, dense)) assert len(tasks) == 13 tasks = autotvm.task.extract_from_program(mod, target=target, params=params, - ops=(relay.op.nn.conv2d, relay.op.nn.dense)) + ops=(conv2d, dense)) + assert len(tasks) == 13 + tasks = autotvm.task.extract_from_program(mod, target=target, + params=params) assert len(tasks) == 13 mod, params, _ = get_network('mobilenet', batch_size=1) @@ -77,18 +83,18 @@ def test_task_extraction(): params_list.append(params) tasks = autotvm.task.extract_from_program(mod, target=target, params=params, - ops=(relay.op.nn.conv2d, relay.op.nn.dense)) + ops=(conv2d, dense)) assert len(tasks) == 20 mod, params, _ = get_network('dcgan', batch_size=1) tasks = autotvm.task.extract_from_program(mod, target=target, params=params, - ops=(relay.op.nn.conv2d_transpose,)) + ops=(conv2d_transpose,)) assert len(tasks) == 4 tasks = autotvm.task.extract_from_multiple_program(mod_list, params_list, target=target, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) assert len(tasks) == 31 def test_template_key_provided(): @@ -136,6 +142,7 @@ def test_template_key_default(): if __name__ == '__main__': test_task_extraction() - test_template_key_provided() - test_template_key_empty() - test_template_key_default() + # TODO(@icemelon9): template key will no long exist, remove these tasks. + # test_template_key_provided() + # test_template_key_empty() + # test_template_key_default() diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 0d3fd4b3f8298..e9acd96f39350 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -222,7 +222,7 @@ def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, continue intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4, atol=1e-4) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), @@ -240,13 +240,13 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, mod = tvm.IRModule() mod["main"] = func - test_schedule='{"i": ["llvm -device=arm_cpu", "topi_nn_depthwise_conv2d_nchw", \ + test_schedule='{"i": ["llvm -device=arm_cpu", "depthwise_conv2d_nchw_spatial_pack.arm_cpu", \ [["TENSOR", [1, 512, 32, 32], "float32"], \ ["TENSOR", [512, 1, 3, 3], "float32"], \ [1, 1], [1, 1], [1, 1], "float32"], {}, \ - ["depthwise_conv2d_nchw", [1, 512, 32, 32, "float32"], \ + ["depthwise_conv2d_nchw_spatial_pack.arm_cpu", [1, 512, 32, 32, "float32"], \ [512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \ - {"i": 743640, "t": "contrib_spatial_pack", "c": null, \ + {"i": 743640, "t": "", "c": null, \ "e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \ ["tile_ow", "sp", [1, 8]], \ ["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \ @@ -319,7 +319,6 @@ def _query_inside(self, target, workload): if key in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() - cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) @@ -1113,6 +1112,9 @@ def _has_fast_int8_instructions(asm, target): else: assert False, "Target should be Skylake or Cascadelake" + # TODO(@anijain2305, @icemelon9): disable conv2d_int8 for NHWC data layout. + # Re-enable this after adding conv2d_NCHWc_int8 support for NHWC. + # compile conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"] llvm_version = tvm.target.codegen.llvm_version_major() @@ -1127,11 +1129,11 @@ def _has_fast_int8_instructions(asm, target): dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) - for ic in [1, 4, 6]: - asm = _compile(ic=ic, oc=16, target=target, data_layout="NHWC", - kernel_layout='HWIO', - dtypes=dtypes) - assert _has_fast_int8_instructions(asm, target) + # for ic in [1, 4, 6]: + # asm = _compile(ic=ic, oc=16, target=target, data_layout="NHWC", + # kernel_layout='HWIO', + # dtypes=dtypes) + # assert _has_fast_int8_instructions(asm, target) # Sweep the output channels to check int8 robustness # Output channels should be a multiple of 16 internally. @@ -1141,20 +1143,20 @@ def _has_fast_int8_instructions(asm, target): dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) - for oc in [4, 16, 20]: - asm = _compile(ic=8, oc=oc, target=target, data_layout="NHWC", - kernel_layout='HWIO', - dtypes=dtypes) - assert _has_fast_int8_instructions(asm, target) + # for oc in [4, 16, 20]: + # asm = _compile(ic=8, oc=oc, target=target, data_layout="NHWC", + # kernel_layout='HWIO', + # dtypes=dtypes) + # assert _has_fast_int8_instructions(asm, target) # Check that both non-divisible oc and ic work asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) - asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', - dtypes=dtypes) - assert _has_fast_int8_instructions(asm, target) + # asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', + # dtypes=dtypes) + # assert _has_fast_int8_instructions(asm, target) # Check that int8 x int8 goes through legalization so that fast instructions can be picked up. for target in targets: @@ -1165,16 +1167,16 @@ def _has_fast_int8_instructions(asm, target): dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) - asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', - dtypes=dtypes) - assert _has_fast_int8_instructions(asm, target) + # asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', + # dtypes=dtypes) + # assert _has_fast_int8_instructions(asm, target) # Ensure that code is generated when datatypes are not HW supported. - dtypes = ('uint8', 'uint8', 'int32') - asm = _compile(ic=16, oc=32, target=target, data_layout="NHWC", kernel_layout='HWIO', - dtypes=dtypes) - # Check that intrinisic is not present in the assembly. - assert not _has_fast_int8_instructions(asm, target) + # dtypes = ('uint8', 'uint8', 'int32') + # asm = _compile(ic=16, oc=32, target=target, data_layout="NHWC", kernel_layout='HWIO', + # dtypes=dtypes) + # # Check that intrinisic is not present in the assembly. + # assert not _has_fast_int8_instructions(asm, target) # Check that a vectorized instruction is generated for older Intel # generations, because we default to NCHWc layout. @@ -1223,7 +1225,7 @@ def test_bitserial_conv2d_infer_type(): y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy = run_infer_type(y) - assert yy.checked_type == relay.TensorType( + assert yy.checked_type == relay.TensorType( (n, 32, 222, 222), "int16") @@ -1233,9 +1235,11 @@ def test_bitpack_infer_type(): x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) - assert yy.checked_type == relay.TensorType( + assert yy.checked_type == relay.TensorType( (32, 2, 128, 128, 1), "uint16") +# TODO(@jwfromm): Need to add bitserial_conv2d & bitpack run test cases + if __name__ == "__main__": test_pool1d() diff --git a/tests/python/unittest/test_graph_tuner_core.py b/tests/python/unittest/test_graph_tuner_core.py index a8b22fd787ee9..173a237bf8d92 100644 --- a/tests/python/unittest/test_graph_tuner_core.py +++ b/tests/python/unittest/test_graph_tuner_core.py @@ -48,7 +48,7 @@ def _create_data(target, dshape, dtype, layout): tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(relay.op.get("nn.conv2d"),)) wkl_list = [ create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype), create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype), @@ -121,7 +121,8 @@ def test_graph_tuner_layout_transform(): dshape = (1, 3, 8, 8) dtype = "float32" layout = "NCHW" - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout) executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file) @@ -156,7 +157,8 @@ def test_DPTuner_run(): dtype = "float32" layout = "NCHW" dshape = (1, 3, 8, 8) - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout) mod = tvm.IRModule() @@ -207,7 +209,8 @@ def test_PBQPTuner_run(): dtype = "float32" layout = "NCHW" dshape = (1, 3, 8, 8) - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout) costs = [0.02, 0.02, 0.045] @@ -255,7 +258,8 @@ def test_many_sub_graphs(): dtype = "float32" dshape = (1, 8, 8, 3) layout = "NCHW" - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] data = relay.var("data", shape=dshape, dtype=dtype) t0 = relay.transpose(data, (0, 3, 1, 2)) @@ -277,7 +281,7 @@ def test_many_sub_graphs(): tasks = autotvm.task.extract_from_program(net["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) wkl_list = [ create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype), create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype), @@ -376,7 +380,8 @@ def test_tuple(): dtype = "float32" dshape = (1, 5, 32, 32) layout = "NCHW" - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] data = relay.var("data", shape=dshape, dtype=dtype) w0 = relay.var("w0_weight") @@ -390,7 +395,7 @@ def test_tuple(): tasks = autotvm.task.extract_from_program(net["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) wkl_list = [ create_workload((1, 5, 32, 32), (2, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype), create_workload((1, 5, 32, 32), (3, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype), @@ -472,7 +477,8 @@ def test_triangle_block(): dtype = "float32" dshape = (1, 3, 8, 8) layout = "NCHW" - target_ops = [relay.nn.conv2d] + conv2d = relay.op.get("nn.conv2d") + target_ops = [conv2d] data = relay.var("data", shape=dshape, dtype=dtype) w0 = relay.var("w0_weight") @@ -488,7 +494,7 @@ def test_triangle_block(): tasks = autotvm.task.extract_from_program(net["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(conv2d,)) wkl_list = [ create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype), create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0, 0, 0), (1, 1), layout, layout, dtype, dtype), diff --git a/tests/python/unittest/test_graph_tuner_utils.py b/tests/python/unittest/test_graph_tuner_utils.py index 397ea235ecbf0..885065fee8d04 100644 --- a/tests/python/unittest/test_graph_tuner_utils.py +++ b/tests/python/unittest/test_graph_tuner_utils.py @@ -36,7 +36,7 @@ def create_workload(dshape, kshape, strides, data = tvm.placeholder(dshape, dtype=dtype) kernel = tvm.placeholder(kshape, dtype=dtype) return autotvm.task.args_to_workload([data, kernel, strides, padding, dilation, layout, - out_dtype], conv2d) + out_layout, out_dtype], "conv2d_NCHWc.x86") def verify_has_multiple_inputs(node_list, node_idx, input_names, expected_result): @@ -119,7 +119,7 @@ def test_get_in_nodes(): out = relay.nn.conv2d(out3, w1) net = relay.Function(relay.analysis.free_vars(out), out) net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)}) - target_ops = ["conv2d"] + target_ops = [relay.op.get("nn.conv2d")] input_names = ["data"] node_list = [] node_dict = {} diff --git a/topi/include/topi/cuda/normalization.h b/topi/include/topi/cuda/normalization.h index 1b42308d0ac29..bfc209db213be 100644 --- a/topi/include/topi/cuda/normalization.h +++ b/topi/include/topi/cuda/normalization.h @@ -35,13 +35,10 @@ using namespace tvm::te; namespace cuda { /*! * \brief Create a CUDA schedule for LRN -* -* \param target The target to generate a schedule for. * \param outs The output tensors. -* * \return A schedule for the given ops. */ -inline Schedule schedule_lrn(const Target &target, const Array& outs) { +inline Schedule schedule_lrn(const Array& outs) { Array out_ops; for (auto t : outs) { out_ops.push_back(t->op); diff --git a/topi/include/topi/rocm/normalization.h b/topi/include/topi/rocm/normalization.h index 692370d65bb78..303f4a8302c71 100644 --- a/topi/include/topi/rocm/normalization.h +++ b/topi/include/topi/rocm/normalization.h @@ -34,14 +34,11 @@ using namespace tvm::te; namespace rocm { /*! * \brief Create a rocm schedule for LRN -* -* \param target The target to generate a schedule for. * \param outs The output tensors. -* * \return A schedule for the given ops. */ -inline Schedule schedule_lrn(const Target &target, const Array& outs) { - return topi::cuda::schedule_lrn(target, outs); +inline Schedule schedule_lrn(const Array& outs) { + return topi::cuda::schedule_lrn(outs); } } // namespace rocm diff --git a/topi/python/topi/__init__.py b/topi/python/topi/__init__.py index a0c6ab0c6d2db..f1019e667e811 100644 --- a/topi/python/topi/__init__.py +++ b/topi/python/topi/__init__.py @@ -40,6 +40,7 @@ from .broadcast import * from .sort import * from .argwhere import * +from . import generic from . import nn from . import x86 from . import cuda diff --git a/topi/python/topi/argwhere.py b/topi/python/topi/argwhere.py index 32f4e8718c46f..c2a9adea0c2ad 100644 --- a/topi/python/topi/argwhere.py +++ b/topi/python/topi/argwhere.py @@ -16,7 +16,6 @@ # under the License. # pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks """Argwhere operator""" -import tvm from tvm import hybrid @hybrid.script @@ -164,7 +163,6 @@ def hybrid_argwhere_5d(output_shape, condition): valid_index += 1 return a -@tvm.target.generic_func def argwhere(output_shape, condition): """Find the indices of elements of a tensor that are non-zero. diff --git a/topi/python/topi/arm_cpu/__init__.py b/topi/python/topi/arm_cpu/__init__.py index 517941c1905f0..63f17422bcf1a 100644 --- a/topi/python/topi/arm_cpu/__init__.py +++ b/topi/python/topi/arm_cpu/__init__.py @@ -17,10 +17,11 @@ """Schedule for ARM CPU""" -from . import conv2d -from . import depthwise_conv2d -from . import conv2d_transpose -from . import conv2d_int8 -from . import bitserial_conv2d -from . import bitserial_dense -from . import injective +from .conv2d import * +from .depthwise_conv2d import * +from .conv2d_transpose import * +from .conv2d_int8 import * +from . import conv2d_alter_op +from .bitserial_conv2d import * +from .bitserial_dense import * +from .injective import * diff --git a/topi/python/topi/arm_cpu/bitserial_conv2d.py b/topi/python/topi/arm_cpu/bitserial_conv2d.py index 4de2b1438a92e..4b80b6b3b7af8 100644 --- a/topi/python/topi/arm_cpu/bitserial_conv2d.py +++ b/topi/python/topi/arm_cpu/bitserial_conv2d.py @@ -26,7 +26,6 @@ from ..nn.bitserial_util import bitpack, binary_op_multiplier from ..nn.util import get_pad_tuple from ..util import get_const_int, get_const_tuple -from .. import generic def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC, use_bitpack=True): if use_bitpack: @@ -38,9 +37,9 @@ def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC, use_bitpack=True): return tvm.compute(kvshape, lambda co, dh, dw, b, vc, ci: \ kernel_q[dh][dw][b][ci][co*VC+vc], name='kernel_vec') -@autotvm.register_topi_compute(bitserial_conv2d_nhwc, 'arm_cpu', 'direct') -def spatial_pack_nhwc(cfg, data, kernel, stride, padding, activation_bits, weight_bits, - pack_dtype, out_dtype, unipolar): +@autotvm.register_topi_compute("bitserial_conv2d_nhwc.arm_cpu") +def bitserial_conv2d_nhwc(cfg, data, kernel, stride, padding, activation_bits, weight_bits, + pack_dtype, out_dtype, unipolar): """ Compute convolution with pack on spatial axes. """ assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" assert pack_dtype == 'uint8', "only support packing into uint8 bits" @@ -323,7 +322,7 @@ def _schedule_spatial_conv2d_nhwc(cfg, s, data_pad, data_vec, kernel_vec, s[last].parallel(oh) return s -@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_conv2d_nhwc, 'arm_cpu', 'direct') +@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.arm_cpu") def schedule_bitserial_conv2d_nhwc(cfg, outs): """Arm cpu schedule for bitserial conv2d""" s = tvm.create_schedule([x.op for x in outs]) diff --git a/topi/python/topi/arm_cpu/bitserial_dense.py b/topi/python/topi/arm_cpu/bitserial_dense.py index 8bd6c5d15f8c4..3f1889c8d7ff9 100644 --- a/topi/python/topi/arm_cpu/bitserial_dense.py +++ b/topi/python/topi/arm_cpu/bitserial_dense.py @@ -21,15 +21,13 @@ from tvm import autotvm from topi.util import get_const_tuple from .. import tag -from .. import generic from .bitserial_conv2d import _intrin_popcount from ..nn.pad import pad -from ..nn.bitserial_dense import bitserial_dense from ..nn.bitserial_util import bitpack, binary_op_multiplier -@autotvm.register_topi_compute(bitserial_dense, ['arm_cpu'], 'direct') -def bitserial_dense_generic(cfg, data, weight, data_bits, weight_bits, pack_dtype, out_dtype, - unipolar): +@autotvm.register_topi_compute('bitserial_dense.arm_cpu') +def bitserial_dense(cfg, data, weight, data_bits, weight_bits, pack_dtype, out_dtype, + unipolar): """The default implementation of bitserial dense in topi. Parameters @@ -111,7 +109,7 @@ def bitserial_dense_generic(cfg, data, weight, data_bits, weight_bits, pack_dtyp return matmul -@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_dense, ['arm_cpu'], 'direct') +@autotvm.register_topi_schedule('bitserial_dense.arm_cpu') def schedule_bitserial_dense(cfg, outs): """Schedule for binary_dense. diff --git a/topi/python/topi/arm_cpu/conv2d.py b/topi/python/topi/arm_cpu/conv2d.py index f0d650adeac1a..54672810a19fa 100644 --- a/topi/python/topi/arm_cpu/conv2d.py +++ b/topi/python/topi/arm_cpu/conv2d.py @@ -18,20 +18,12 @@ """Conv2D schedule for ARM CPU""" from __future__ import absolute_import as _abs -import logging - import tvm from tvm import autotvm import tvm.contrib.nnpack -from ..generic import schedule_conv2d_nchw, schedule_conv2d_nhwc, \ - schedule_conv2d_winograd_without_weight_transform, \ - schedule_conv2d_winograd_nnpack_without_weight_transform from ..util import traverse_inline, get_const_tuple -from ..nn import dilate, pad, conv2d, conv2d_alter_layout, \ - conv2d_winograd_without_weight_transform, \ - conv2d_winograd_nnpack_without_weight_transform, \ - depthwise_conv2d_nchw +from .. import nn from ..nn.util import get_const_int, get_pad_tuple from ..nn.winograd_util import winograd_transform_matrices from .conv2d_spatial_pack import conv2d_spatial_pack_nchw, \ @@ -39,75 +31,15 @@ schedule_conv2d_spatial_pack_nchw, \ schedule_conv2d_spatial_pack_nhwc -logger = logging.getLogger('topi') - -@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['direct']) -def conv2d_arm_cpu(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - """TOPI compute callback for conv2d - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - data : tvm.Tensor - 4-D with shape [batch, in_channel, in_height, in_width] - - kernel : tvm.Tensor - 4-D with shape [num_filter, in_channel, filter_height, filter_width] or - pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height, - filter_width, num_filter_block] - strides : list of two ints - [stride_height, stride_width] +@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.arm_cpu") +def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): + return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, + dilation, out_dtype, num_tile=2) - padding : list of two ints - [pad_height, pad_width] - - dilation : list of two ints - [dilation_height, dilation_width] - - layout : str - layout of data - - out_dtype: str - The output type. This is used for mixed precision. - - Returns - ------- - output : tvm.Tensor - 4-D with shape [batch, out_channel, out_height, out_width] - """ - if layout == 'NCHW': - return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, - dilation, out_dtype, num_tile=2) - elif layout == 'NHWC': - return conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, - dilation, out_dtype) - else: - raise ValueError("Unsupported layout {}".format(layout)) - -@autotvm.register_topi_schedule( - schedule_conv2d_nchw, 'arm_cpu', - ['direct', 'winograd', 'winograd_nnpack_fp16', 'winograd_nnpack_fp32']) -def schedule_conv2d_nchw_arm_cpu(cfg, outs): - """TOPI schedule callback for conv2d - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - outs: Array of Tensor - The computation graph description of conv2d - in the format of an array of tensors. - - Returns - ------- - s: Schedule - The computation schedule for conv2d. - """ +@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.arm_cpu") +def schedule_conv2d_nchw_spatial_pack(cfg, outs): s = tvm.create_schedule([x.op for x in outs]) def _callback(op): @@ -131,35 +63,18 @@ def _callback(op): schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, outs[0]) - if 'winograd_conv2d_output' in op.tag: - output = op.output(0) - _schedule_winograd(cfg, s, output, outs[0]) - - if 'winograd_nnpack_conv2d_output' in op.tag: - output = op.output(0) - _schedule_winograd_nnpack(cfg, s, output, outs[0]) - traverse_inline(s, outs[0].op, _callback) return s -@autotvm.register_topi_schedule(schedule_conv2d_nhwc, 'arm_cpu', ['direct']) -def schedule_conv2d_nhwc_arm_cpu(cfg, outs): - """TOPI schedule callback for conv2d - Parameters - ---------- - cfg: ConfigEntity - The config for this template +@autotvm.register_topi_compute("conv2d_nhwc_spatial_pack.arm_cpu") +def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): + return conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, + dilation, out_dtype) - outs: Array of Tensor - The computation graph description of conv2d - in the format of an array of tensors. - Returns - ------- - s: Schedule - The computation schedule for conv2d. - """ +@autotvm.register_topi_schedule("conv2d_nhwc_spatial_pack.arm_cpu") +def schedule_conv2d_nhwc_spatial_pack(cfg, outs): s = tvm.create_schedule([x.op for x in outs]) def _callback(op): @@ -170,14 +85,27 @@ def _callback(op): return s -@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd']) -def conv2d_arm_cpu_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - """ TOPI compute callback. Use winograd template """ +@autotvm.register_topi_compute("conv2d_nchw_winograd.arm_cpu") +def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype): tile_size = 4 - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, + return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size) -def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): + +@autotvm.register_topi_schedule("conv2d_nchw_winograd.arm_cpu") +def schedule_conv2d_nchw_winograd(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if 'winograd_conv2d_output' in op.tag: + output = op.output(0) + _schedule_winograd(cfg, s, output, outs[0]) + + traverse_inline(s, outs[0].op, _callback) + return s + + +def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size): N, CI, IH, IW = get_const_tuple(data.shape) if isinstance(dilation, int): @@ -187,7 +115,7 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt if len(kernel.shape) == 4: if dilation_h != 1 or dilation_w != 1: - kernel = dilate(kernel, (1, 1, dilation_h, dilation_w)) + kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w)) pre_computed = False CO, _, KH, KW = get_const_tuple(kernel.shape) else: @@ -199,9 +127,8 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) - assert layout == 'NCHW' assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1 - data_pad = pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") + data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") idxd = tvm.indexdiv idxm = tvm.indexmod @@ -272,6 +199,7 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt cfg.add_flop(2 * N * K * H * W * KH * KW * C) return output + def _schedule_winograd(cfg, s, output, last): Y = output.op.input_tensors[0] M, A = Y.op.input_tensors @@ -356,26 +284,37 @@ def _schedule_winograd(cfg, s, output, last): s[output].compute_inline() -@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp16']) -def conv2d_arm_cpu_winograd_nnpack_fp16( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - """ TOPI compute callback. Use winograd_nnpack_fp16 template """ - return conv2d_arm_cpu_winograd_nnpack( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype, - tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16) +@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack.arm_cpu") +def conv2d_nchw_winograd_nnpack(cfg, data, kernel, strides, padding, dilation, out_dtype): + dtype = data.dtype + if dtype == "float32": + return _conv2d_arm_cpu_winograd_nnpack( + cfg, data, kernel, strides, padding, dilation, out_dtype, + tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8) + elif dtype == "float16": + return _conv2d_arm_cpu_winograd_nnpack( + cfg, data, kernel, strides, padding, dilation, out_dtype, + tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16) + else: + raise ValueError("Unsupported data type {} for conv2d winograd nnpack". + format(dtype)) + + +@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack.arm_cpu") +def schedule_conv2d_nchw_winograd_nnpack(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + def _callback(op): + if 'winograd_nnpack_conv2d_output' in op.tag: + output = op.output(0) + _schedule_winograd_nnpack(cfg, s, output, outs[0]) -@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp32']) -def conv2d_arm_cpu_winograd_nnpack_fp32( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - """ TOPI compute callback. Use winograd_nnpack_fp32 template """ - return conv2d_arm_cpu_winograd_nnpack( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype, - tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8) + traverse_inline(s, outs[0].op, _callback) + return s -def conv2d_arm_cpu_winograd_nnpack( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype, convolution_algorithm): +def _conv2d_arm_cpu_winograd_nnpack( + cfg, data, kernel, strides, padding, dilation, out_dtype, convolution_algorithm): """ TOPI compute callback. Use winograd NNPACK template """ N, CI, IH, IW = get_const_tuple(data.shape) @@ -389,7 +328,6 @@ def conv2d_arm_cpu_winograd_nnpack( HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) - assert layout == 'NCHW' assert KH == 3 and KW == 3 and pt == 1 and pb == 1 and pl == 1 and pr == 1 and HSTR == 1\ and WSTR == 1 H = (IH + pt + pb - 3) // HSTR + 1 @@ -416,6 +354,7 @@ def conv2d_arm_cpu_winograd_nnpack( cfg.add_flop(2 * N * CI * H * W * KH * KW * CO) return output + def _schedule_winograd_nnpack(cfg, s, output, last): # Could have bias. @@ -429,36 +368,9 @@ def _schedule_winograd_nnpack(cfg, s, output, last): s[TK].pragma(s[TK].op.axis[0], 'debug_skip_region') -##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM ##### -@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, 'arm_cpu', ['winograd']) -def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): - """TOPI compute callback""" - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype,\ - tile_size) - - -@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform, - 'arm_cpu', ['winograd']) -def schedule_conv2d_winograd_without_weight_transform_(cfg, outs): - """TOPI schedule callback""" - s = tvm.create_schedule([x.op for x in outs]) - - def _callback(op): - if 'winograd_conv2d_output' in op.tag: - output = op.output(0) - _schedule_winograd(cfg, s, output, outs[0]) - - traverse_inline(s, outs[0].op, _callback) - return s - - -##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD NNPACK WITHOUT WEIGHT TRANSFORM ##### -@autotvm.register_topi_compute(conv2d_winograd_nnpack_without_weight_transform, - 'arm_cpu', - ['winograd_nnpack_fp16', 'winograd_nnpack_fp32']) -def conv2d_winograd_nnpack_ww(cfg, data, transformed_kernel, bias, strides, - padding, dilation, layout, out_dtype): - """ TOPI compute callback. Use winograd NNPACK template """ +@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu") +def conv2d_nchw_winograd_nnpack_without_weight_transform( + cfg, data, transformed_kernel, bias, strides, padding, dilation, out_dtype): N, CI, IH, IW = get_const_tuple(data.shape) if isinstance(dilation, int): dilation_h = dilation_w = dilation @@ -471,7 +383,6 @@ def conv2d_winograd_nnpack_ww(cfg, data, transformed_kernel, bias, strides, KH, KW = 3, 3 pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) - assert layout == 'NCHW' assert KH == 3 and KW == 3 and pt == 1 and pb == 1 and pl == 1 and pr == 1 and HSTR == 1\ and WSTR == 1 H = (IH + pt + pb - 3) // HSTR + 1 @@ -492,9 +403,8 @@ def conv2d_winograd_nnpack_ww(cfg, data, transformed_kernel, bias, strides, return output -@autotvm.register_topi_schedule(schedule_conv2d_winograd_nnpack_without_weight_transform, - 'arm_cpu', ['winograd_nnpack_fp16', 'winograd_nnpack_fp32']) -def schedule_conv2d_winograd_nnpack_without_weight_transform_(cfg, outs): +@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu") +def schedule_conv2d_nchw_winograd_nnpack_without_weight_transform(cfg, outs): """TOPI schedule callback""" s = tvm.create_schedule([x.op for x in outs]) @@ -505,226 +415,3 @@ def _callback(op): traverse_inline(s, outs[0].op, _callback) return s - - -##### REGISTER ALTER OP LAYOUT ##### -@conv2d_alter_layout.register(["arm_cpu"]) -def _alter_conv2d_layout_arm(attrs, inputs, tinfos, F): - """Alter op layout for pre-computing kernel transformation - - Parameters - ---------- - attrs : tvm.ir.Attrs - Attributes of current convolution - inputs : tvm.relay.Expr - Grouped input symbols - tinfos : list - Input shape and dtype - F: symbol - The context, can be either relay.op - - Note - ---- - Unlike other TOPI functions, this function operates on both graph level and operator level, - so we have to pass 'F' to make it support our two versions of graph IR, Relay. - """ - copy_inputs = list(inputs) - new_attrs = {k: attrs[k] for k in attrs.keys()} - - if F.__name__ == 'tvm.relay.op': - # Derive channels for frontends (e.g ONNX) that miss "channel" field. - new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')] - - dilation = attrs.get_int_tuple("dilation") - strides = attrs.get_int_tuple("strides") - padding = attrs.get_int_tuple("padding") - groups = attrs.get_int('groups') - data_layout_key = "data_layout" if "data_layout" in new_attrs else "layout" - layout = attrs[data_layout_key] - kernel_layout = attrs['kernel_layout'] - out_dtype = attrs["out_dtype"] - if out_dtype in ("same", ""): - out_dtype = tinfos[0].dtype - - if dilation != (1, 1): - logger.warning("Does not support weight pre-transform for dilated convolution.") - return None - - # query config of this workload - data, kernel = tinfos[0:2] - if groups == 1: - workload = autotvm.task.args_to_workload( - [data, kernel, strides, padding, dilation, layout, out_dtype], conv2d) - else: - workload = autotvm.task.args_to_workload( - [data, kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw) - - if layout == 'NCHW' and kernel_layout == 'OIHW': - N, CI, H, W = get_const_tuple(data.shape) - CO, _, KH, KW = get_const_tuple(kernel.shape) - elif layout == 'NHWC' and kernel_layout == 'HWIO': - N, H, W, CI = get_const_tuple(data.shape) - KH, KW, _, CO = get_const_tuple(kernel.shape) - # Also modify the workload to pick up because later we convert to NCHW - # layout. - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_kernel = tvm.placeholder((CO, CI, KH, KW), dtype=kernel.dtype) - new_layout = 'NCHW' - workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype], conv2d) - elif layout == 'NHWC' and kernel_layout == 'HWOI': - # This is the case for depthwise convolution. - N, H, W, CI = get_const_tuple(data.shape) - KH, KW, CO, M = get_const_tuple(kernel.shape) - # Also modify the workload to pick up because later we convert to NCHW - # layout. - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_kernel = tvm.placeholder((CO, M, KH, KW), dtype=kernel.dtype) - workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw) - else: - return None - - idxd = tvm.indexdiv - - if groups == 1: - target = tvm.target.Target.current() - dispatch_ctx = autotvm.DispatchContext.current - cfg = dispatch_ctx.query(target, workload) - - if cfg.is_fallback: # if is fallback, clear query cache and return None - autotvm.task.clear_fallback_cache(target, workload) - if layout == 'NHWC' and kernel_layout == 'HWIO': - new_attrs['data_layout'] = 'NCHW' - new_attrs['kernel_layout'] = 'OIHW' - return F.nn.conv2d(*copy_inputs, **new_attrs) - return None - - if cfg.template_key == 'direct': # pack weight tensor - VC = cfg['tile_co'].size[-1] - new_attrs['kernel_layout'] = 'OIHW%do' % VC - - # Store the same config for the altered operator (workload) - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_attrs[data_layout_key] = 'NCHW' - new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, 'NCHW', out_dtype], conv2d) - dispatch_ctx.update(target, new_workload, cfg) - - return F.nn.conv2d(*copy_inputs, **new_attrs) - elif cfg.template_key == "winograd": # pre-compute weight transformation in winograd - if "-device=arm_cpu" in target.options: - tile_size = 4 - VC = cfg['tile_k'].size[-1] - elif "-device=bifrost" in target.options: - tile_size = 2 - VC = 0 - else: - from ..mali.conv2d import _pick_tile_size - tile_size = _pick_tile_size(tinfos[0], tinfos[1]) - VC = cfg['tile_bna'].val - - weight = copy_inputs[1] - if kernel_layout != 'OIHW': - weight = F.transpose(weight, axes=(2, 3, 0, 1)) - weight = F.nn.contrib_conv2d_winograd_weight_transform(weight, - tile_size=tile_size) - if VC > 0: - weight = F.reshape(weight, - newshape=(KH + tile_size - 1, - KW + tile_size - 1, - idxd(CO, VC), VC, CI)) - weight = F.transpose(weight, axes=[0, 1, 2, 4, 3]) - new_weight = tvm.placeholder((KH + tile_size - 1, - KW + tile_size -1, - idxd(CO, VC), CI, VC), - kernel.dtype) - else: - weight = F.reshape(weight, - newshape=(KH + tile_size - 1, KW + tile_size - 1, CO, CI)) - new_weight = tvm.placeholder( - (KH + tile_size - 1, KW + tile_size -1, CO, CI), kernel.dtype - ) - - copy_inputs[1] = weight - new_attrs['tile_size'] = tile_size - new_attrs[data_layout_key] = 'NCHW' - - # Store the same config for the altered operator (workload) - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, - new_attrs[data_layout_key], out_dtype, tile_size], - conv2d_winograd_without_weight_transform) - dispatch_ctx.update(target, new_workload, cfg) - - return F.nn.contrib_conv2d_winograd_without_weight_transform(*copy_inputs, **new_attrs) - elif cfg.template_key in ["winograd_nnpack_fp16", "winograd_nnpack_fp32"]: - # pre-compute winograd_nnpack transform - # for winograd_nnpack_fp16, the the precomputeprune pass must run on device, - # where float16 is supported - weight_dtype = 'float32' - weight = copy_inputs[1] - if kernel_layout != 'OIHW': - weight = F.transpose(weight, axes=(2, 3, 0, 1)) - weight = F.nn.contrib_conv2d_winograd_weight_transform(weight, - tile_size=tile_size) - transformed_kernel = F.nn.contrib_conv2d_winograd_nnpack_weight_transform( - weight, - convolution_algorithm=cfg['winograd_nnpack_algorithm'].val, - out_dtype=weight_dtype) - copy_inputs[1] = transformed_kernel - - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_kernel = tvm.placeholder((CO, CI, 8, 8), "float32") - bias = tvm.placeholder((CO, ), "float32") - new_attrs[data_layout_key] = 'NCHW' - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, bias, strides, - padding, dilation, new_attrs[data_layout_key], out_dtype] - if len(copy_inputs) == 3 else - [new_data, new_kernel, strides, - padding, dilation, new_attrs[data_layout_key], out_dtype], - conv2d_winograd_nnpack_without_weight_transform) - dispatch_ctx.update(target, new_workload, cfg) - return F.nn.contrib_conv2d_winograd_nnpack_without_weight_transform( - *copy_inputs, **new_attrs) - else: - raise RuntimeError("Unsupported template_key '%s'" % cfg.template_key) - else: - target = tvm.target.Target.current() - dispatch_ctx = autotvm.DispatchContext.current - cfg = dispatch_ctx.query(target, workload) - - if cfg.is_fallback: # if is fallback, clear query cache and return None - autotvm.task.clear_fallback_cache(tvm.target.Target.current(), workload) - if layout == 'NHWC' and kernel_layout == 'HWOI': - new_attrs['data_layout'] = 'NCHW' - new_attrs['kernel_layout'] = 'OIHW' - return F.nn.conv2d(*copy_inputs, **new_attrs) - return None - if cfg.template_key == 'contrib_spatial_pack': - VC = cfg['tile_co'].size[-1] - new_attrs['kernel_layout'] = 'OIHW%do' % (cfg['tile_co'].size[-1]) - - # Store the same config for the altered operator (workload) - new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype) - new_attrs[data_layout_key] = 'NCHW' - if attrs['kernel_layout'] == 'OIHW': - CO, M, KH, KW = get_const_tuple(kernel.shape) - elif attrs['kernel_layout'] == 'HWOI': - KH, KW, CO, M = get_const_tuple(kernel.shape) - else: - raise RuntimeError("Depthwise conv should either have OIHW/HWIO kernel layout") - new_kernel = tvm.placeholder((idxd(CO, VC), M, KH, KW, VC), dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, out_dtype], - depthwise_conv2d_nchw) - dispatch_ctx.update(target, new_workload, cfg) - - return F.nn.conv2d(*copy_inputs, **new_attrs) - else: - # currently we only have contrib_spatial_pack and direct template - # add more schedule templates. - return None diff --git a/topi/python/topi/arm_cpu/conv2d_alter_op.py b/topi/python/topi/arm_cpu/conv2d_alter_op.py new file mode 100644 index 0000000000000..869b1d44ed643 --- /dev/null +++ b/topi/python/topi/arm_cpu/conv2d_alter_op.py @@ -0,0 +1,167 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member +"""Conv2D alter op and legalize functions for arm cpu""" + +import logging + +import tvm +from tvm import relay +from tvm import autotvm + +from ..nn import conv2d_alter_layout +from ..util import get_const_tuple + + +logger = logging.getLogger('topi') + + +@conv2d_alter_layout.register(["arm_cpu"]) +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + if cfg.is_fallback: # if is fallback, clear query cache and return None + autotvm.task.clear_fallback_cache(target, workload) + return None + + topi_tmpl = workload[0] + new_attrs = {k: attrs[k] for k in attrs.keys()} + + strides = attrs.get_int_tuple("strides") + padding = attrs.get_int_tuple("padding") + dilation = attrs.get_int_tuple("dilation") + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data, kernel = tinfos + out_dtype = out_type.dtype + + idxd = tvm.indexdiv + + if topi_tmpl == "conv2d_nchw_spatial_pack.arm_cpu": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + VC = cfg['tile_co'].size[-1] + + new_attrs['kernel_layout'] = 'OIHW%do' % VC + + new_data = data + new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + "conv2d_nchw_spatial_pack.arm_cpu") + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.conv2d(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_nhwc_spatial_pack.arm_cpu": + assert data_layout == "NHWC" and kernel_layout == "HWIO" + N, H, W, CI = get_const_tuple(data.shape) + KH, KW, _, CO = get_const_tuple(kernel.shape) + VC = cfg['tile_co'].size[-1] + + new_attrs['kernel_layout'] = 'OHWI%do' % VC + + new_data = data + new_kernel = tvm.placeholder((idxd(CO, VC), KH, KW, CI, VC), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + "conv2d_nhwc_spatial_pack.arm_cpu") + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.conv2d(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_nchw_winograd.arm_cpu": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + VC = cfg['tile_k'].size[-1] + tile_size = 4 + + weight_expr = inputs[1] + weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform( + weight_expr, tile_size=tile_size) + weight_expr = relay.reshape(weight_expr, + newshape=(KH + tile_size - 1, + KW + tile_size - 1, + idxd(CO, VC), VC, CI)) + weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3]) + + new_attrs['tile_size'] = tile_size + + new_data = data + new_kernel = tvm.placeholder((KH + tile_size - 1, + KW + tile_size -1, + idxd(CO, VC), CI, VC), + kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + 'conv2d_nchw_winograd.arm_cpu') + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.contrib_conv2d_winograd_without_weight_transform( + inputs[0], weight_expr, **new_attrs) + elif topi_tmpl == "conv2d_nchw_winograd_nnpack.arm_cpu": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + + # pre-compute winograd_nnpack transform + # for winograd_nnpack_fp16, the the precompute prune pass must run on device, + # where float16 is supported + weight_dtype = 'float32' + weight_expr = inputs[1] + transformed_weight = relay.nn.contrib_conv2d_winograd_nnpack_weight_transform( + weight_expr, + convolution_algorithm=cfg['winograd_nnpack_algorithm'].val, + out_dtype=weight_dtype) + + new_data = data + new_kernel = tvm.placeholder((CO, CI, 8, 8), "float32") + + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, None, strides, padding, dilation, out_dtype], + "conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu") + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_conv2d_winograd_without_weight_transform( + inputs[0], transformed_weight, **new_attrs) + elif topi_tmpl == "depthwise_conv2d_nchw_spatial_pack.arm_cpu": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + VC = cfg['tile_co'].size[-1] + + new_attrs['kernel_layout'] = 'OIHW%do' % (cfg['tile_co'].size[-1]) + + # Store the same config for the altered operator (workload) + new_data = data + new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + "depthwise_conv2d_nchw_spatial_pack.arm_cpu") + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.conv2d(*inputs, **new_attrs) + else: + return None diff --git a/topi/python/topi/arm_cpu/conv2d_int8.py b/topi/python/topi/arm_cpu/conv2d_int8.py index 8f43f5c210d41..cd413d659203f 100644 --- a/topi/python/topi/arm_cpu/conv2d_int8.py +++ b/topi/python/topi/arm_cpu/conv2d_int8.py @@ -21,7 +21,6 @@ from tvm import autotvm from .. import generic, tag from ..util import get_const_tuple -from ..nn.conv2d import conv2d_NCHWc_int8 from ..generic import conv2d as conv2d_generic from .. import nn from ..nn.conv2d import _get_workload as _get_conv2d_workload @@ -42,9 +41,9 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype): cfg, wkl, int32_lanes=2, num_int8_elements=4) -@autotvm.register_topi_compute(conv2d_NCHWc_int8, ['arm_cpu'], 'direct') -def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides, - padding, dilation, layout, out_layout, out_dtype): +@autotvm.register_topi_compute("conv2d_NCHWc_int8.arm_cpu") +def conv2d_NCHWc_int8(cfg, data, kernel, strides, + padding, dilation, layout, out_layout, out_dtype): # layout and out_layout are not used here, # we keep them for debug convenience when dumping autotvm workload n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) @@ -68,8 +67,8 @@ def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, ['arm_cpu'], ['direct']) -def _schedule_conv2d_NCHWc_int8(cfg, outs): +@autotvm.register_topi_schedule("conv2d_NCHWc_int8.arm_cpu") +def schedule_conv2d_NCHWc_int8(cfg, outs): """Create schedule for tensors""" s = tvm.create_schedule([x.op for x in outs]) scheduled_ops = [] @@ -86,7 +85,7 @@ def traverse(op): if 'conv2d_NCHWc_int8' in op.tag: conv_out = op.output(0) - kernel = conv_out.op.input_tensors[1] + kernel_vec = conv_out.op.input_tensors[1] data_vec = conv_out.op.input_tensors[0] data = data_vec.op.input_tensors[0] \ if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \ @@ -95,9 +94,9 @@ def traverse(op): data_pad = data data = data_pad.op.input_tensors[0] - args = [s, cfg, data_vec, conv_out, outs[0]] + args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]] # int8 conv kernel is 7-dim - _, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape) + _, _, kh, kw, _, _, _ = get_const_tuple(kernel_vec.shape) dtype = "uint" if data.dtype == "uint8" else "int" if kh == 1 and kw == 1: conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8( diff --git a/topi/python/topi/arm_cpu/conv2d_spatial_pack.py b/topi/python/topi/arm_cpu/conv2d_spatial_pack.py index 350a0227ef480..032ac76ff6a22 100644 --- a/topi/python/topi/arm_cpu/conv2d_spatial_pack.py +++ b/topi/python/topi/arm_cpu/conv2d_spatial_pack.py @@ -78,10 +78,12 @@ def conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, dilation, # fallback support if cfg.is_fallback: if num_tile == 2: # arm cpu - ref_log = autotvm.tophub.load_reference_log('arm_cpu', 'rk3399', 'conv2d', 'direct') + ref_log = autotvm.tophub.load_reference_log( + 'arm_cpu', 'rk3399', 'conv2d_nchw_spatial_pack.arm_cpu') cfg.fallback_with_reference_log(ref_log) elif num_tile == 3: # mali gpu - ref_log = autotvm.tophub.load_reference_log('mali', 'rk3399', 'conv2d', 'direct') + ref_log = autotvm.tophub.load_reference_log( + 'mali', 'rk3399', 'conv2d_nchw_spatial_pack.mali') cfg.fallback_with_reference_log(ref_log) # ==================================================================== diff --git a/topi/python/topi/arm_cpu/conv2d_transpose.py b/topi/python/topi/arm_cpu/conv2d_transpose.py index 65f1024c88a30..93ff02900f37c 100644 --- a/topi/python/topi/arm_cpu/conv2d_transpose.py +++ b/topi/python/topi/arm_cpu/conv2d_transpose.py @@ -21,13 +21,12 @@ import tvm from tvm import autotvm -from ..generic import schedule_conv2d_transpose_nchw -from ..nn import conv2d_transpose_nchw, dilate, pad, get_pad_tuple +from ..nn import dilate, pad, get_pad_tuple from ..util import get_const_tuple, traverse_inline from .conv2d_spatial_pack import schedule_conv2d_spatial_pack_nchw -@autotvm.task.register_topi_compute(conv2d_transpose_nchw, "arm_cpu", "direct") -def conv2d_transpose_nchw_arm(cfg, Input, Filter, strides, padding, out_dtype): +@autotvm.register_topi_compute("conv2d_transpose_nchw.arm_cpu") +def conv2d_transpose_nchw(cfg, Input, Filter, strides, padding, out_dtype): """Transposed 2D convolution nchw forward operator. Parameters @@ -135,8 +134,8 @@ def _decl_spatial_pack(cfg, data, kernel, strides, padding, layout, out_dtype, n # register customized schedule for arm cpu. -@autotvm.task.register_topi_schedule(schedule_conv2d_transpose_nchw, "arm_cpu", "direct") -def schedule_conv2d_transpose_arm(cfg, outs): +@autotvm.register_topi_schedule("conv2d_transpose_nchw.arm_cpu") +def schedule_conv2d_transpose_nchw(cfg, outs): """Schedule conv2d transpose for arm cpu""" s = tvm.create_schedule([x.op for x in outs]) diff --git a/topi/python/topi/arm_cpu/depthwise_conv2d.py b/topi/python/topi/arm_cpu/depthwise_conv2d.py index 207fc712c450d..9a79f984edb14 100644 --- a/topi/python/topi/arm_cpu/depthwise_conv2d.py +++ b/topi/python/topi/arm_cpu/depthwise_conv2d.py @@ -20,19 +20,18 @@ import tvm from tvm import autotvm -from ..generic import schedule_depthwise_conv2d_nchw -from ..nn import depthwise_conv2d_nchw, pad +from .. import nn from ..util import traverse_inline, get_const_tuple, get_const_int from ..nn.util import get_pad_tuple -# register original implementation of depthwise_conv2d_nchw since we don't need to change this part -autotvm.register_topi_compute(depthwise_conv2d_nchw, 'arm_cpu', 'direct', - depthwise_conv2d_nchw.fdefault) -# register customized schedule for arm cpu. -@autotvm.register_topi_schedule(schedule_depthwise_conv2d_nchw, 'arm_cpu', - ['direct', 'contrib_spatial_pack']) -def schedule_depthwise_conv2d_nchw_arm(cfg, outs): +@autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu") +def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype): + return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) + + +@autotvm.register_topi_schedule("depthwise_conv2d_nchw.arm_cpu") +def schedule_depthwise_conv2d_nchw(cfg, outs): """Schedule depthwise conv2d Parameters @@ -65,7 +64,7 @@ def _schedule(cfg, s, data, data_pad, kernel, output): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - 'arm_cpu', 'rk3399', 'depthwise_conv2d_nchw', 'direct') + 'arm_cpu', 'rk3399', 'depthwise_conv2d_nchw.arm_cpu') cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### @@ -134,25 +133,12 @@ def _callback(op): data = data_pad.op.input_tensors[0] _schedule(cfg, s, data, data_pad, kernel, output) - if op.tag == 'spatial_depthwise_conv2d_nchw_output': - output = op.output(0) - conv = op.input_tensors[0] - data_vec = conv.op.input_tensors[0] - kernel_vec = conv.op.input_tensors[1] - if kernel_vec.op.name == 'kernel_vec': - kernel = kernel_vec.op.input_tensors[0] - else: - kernel = kernel_vec - if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag: - s[kernel].compute_inline() - - _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0]) - traverse_inline(s, outs[0].op, _callback) return s -@autotvm.register_topi_compute(depthwise_conv2d_nchw, 'arm_cpu', ['contrib_spatial_pack']) -def depthwise_conv2d_arm_cpu(cfg, data, kernel, strides, padding, dilation, out_dtype): + +@autotvm.register_topi_compute("depthwise_conv2d_nchw_spatial_pack.arm_cpu") +def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): """TOPI compute callback for depthwise_conv2d nchw Parameters @@ -189,6 +175,29 @@ def depthwise_conv2d_arm_cpu(cfg, data, kernel, strides, padding, dilation, out_ return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2) +@autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu") +def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs): + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == 'spatial_depthwise_conv2d_nchw_output': + output = op.output(0) + conv = op.input_tensors[0] + data_vec = conv.op.input_tensors[0] + kernel_vec = conv.op.input_tensors[1] + if kernel_vec.op.name == 'kernel_vec': + kernel = kernel_vec.op.input_tensors[0] + else: + kernel = kernel_vec + if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag: + s[kernel].compute_inline() + _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0]) + + traverse_inline(s, outs[0].op, _callback) + return s + + def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile): out_dtype = out_dtype or data.dtype @@ -220,16 +229,16 @@ def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, WPAD = pad_left + pad_right DOPAD = (HPAD != 0 or WPAD != 0) if DOPAD: - data_pad = pad(data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), - name="data_pad") + data_pad = nn.pad(data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), + name="data_pad") else: data_pad = data # fallback support # Currently, Mali schedule doesn't use it like conv2d. if cfg.is_fallback: - ref_log = autotvm.tophub.load_reference_log('arm_cpu', 'rk3399', 'depthwise_conv2d_nchw', - 'contrib_spatial_pack') + ref_log = autotvm.tophub.load_reference_log( + 'arm_cpu', 'rk3399', 'depthwise_conv2d_nchw_spatial_pack.arm_cpu') cfg.fallback_with_reference_log(ref_log) # ==================== define configuration space ==================== diff --git a/topi/python/topi/arm_cpu/injective.py b/topi/python/topi/arm_cpu/injective.py index 0b6a16d37d1a9..644a7e3fb5233 100644 --- a/topi/python/topi/arm_cpu/injective.py +++ b/topi/python/topi/arm_cpu/injective.py @@ -17,10 +17,8 @@ # pylint: disable=invalid-name, unused-variable """Schedule for pooling operators""" import tvm -from .. import generic from ..util import is_empty_shape -@generic.schedule_injective_from_existing.register(["arm_cpu"]) def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -46,7 +44,6 @@ def schedule_injective_from_existing(sch, out): sch[out].parallel(sch[out].op.axis[0]) return sch -@generic.schedule_injective.register(["arm_cpu"]) def schedule_injective(outs): """ARM CPU schedule for injective op. @@ -74,7 +71,6 @@ def schedule_injective(outs): schedule_injective_from_existing(s, x) return s -@generic.schedule_concatenate.register(["arm_cpu"]) def schedule_concatenate(outs): """Schedule for concatenate op. diff --git a/topi/python/topi/bifrost/conv2d.py b/topi/python/topi/bifrost/conv2d.py index 2ae65800e9256..7956d06fc3fac 100644 --- a/topi/python/topi/bifrost/conv2d.py +++ b/topi/python/topi/bifrost/conv2d.py @@ -19,23 +19,21 @@ """conv2d schedule on ARM Mali (Bifrost) GPU""" import tvm +from tvm import relay from tvm import autotvm from .gemm import decl_winograd_gemm, schedule_gemm from .transforms import tile_and_bind, tile_and_bind3d -from ..generic import schedule_conv2d_nchw, schedule_conv2d_winograd_without_weight_transform from ..util import traverse_inline, get_const_int, get_const_tuple -from ..nn import conv2d, conv2d_winograd_without_weight_transform, \ - get_pad_tuple, pad, conv2d_alter_layout, dilate +from .. import nn from ..nn.winograd_util import winograd_transform_matrices # reuse some compute declarations from ARM CPU from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw -from ..arm_cpu.conv2d import _alter_conv2d_layout_arm -@autotvm.register_topi_compute(conv2d, 'bifrost', ['direct']) -def conv2d_bifrost(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): +@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.bifrost") +def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): """TOPI compute callback for conv2d Parameters @@ -60,9 +58,6 @@ def conv2d_bifrost(cfg, data, kernel, strides, padding, dilation, layout, out_dt dilation : list of two ints [dilation_height, dilation_width] - layout : str - layout of data - out_dtype: str The output type. This is used for mixed precision. @@ -71,14 +66,12 @@ def conv2d_bifrost(cfg, data, kernel, strides, padding, dilation, layout, out_dt output : tvm.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ - if layout == 'NCHW': - return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, - dilation, out_dtype, num_tile=3) - raise ValueError("Unsupported layout {}".format(layout)) + return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, + dilation, out_dtype, num_tile=3) -@autotvm.register_topi_schedule(schedule_conv2d_nchw, 'bifrost', ['direct', 'winograd']) -def schedule_conv2d_nchw_bifrost(cfg, outs): +@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.bifrost") +def schedule_conv2d_nchw_spatial_pack(cfg, outs): """TOPI schedule callback for conv2d Parameters @@ -116,9 +109,6 @@ def _callback(op): _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec) - if 'winograd_conv2d_output' in op.tag: - _schedule_winograd(cfg, s, op) - traverse_inline(s, outs[0].op, _callback) return s @@ -195,10 +185,22 @@ def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec): return s -@autotvm.register_topi_compute(conv2d, 'bifrost', ['winograd']) -def conv2d_bifrost_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): +@autotvm.register_topi_compute("conv2d_nchw_winograd.bifrost") +def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype): """Use Winograd as the convolution method""" - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype) + return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype) + + +@autotvm.register_topi_schedule("conv2d_nchw_winograd.bifrost") +def schedule_conv2d_nchw_winograd(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if 'winograd_conv2d_output' in op.tag: + _schedule_winograd(cfg, s, op) + + traverse_inline(s, outs[0].op, _callback) + return s def _decl_winograd_kernel_transform(kernel, tile_size, G): @@ -256,7 +258,7 @@ def upround(x, align): return U -def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size=2): +def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size=2): """Declare a winograd convolution - only tile_size=2 is currently supported""" N, CI, IH, IW = get_const_tuple(data.shape) if isinstance(dilation, int): @@ -266,7 +268,7 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt if int(kernel.shape[2]) == 3: if dilation_h != 1 or dilation_w != 1: - kernel = dilate(kernel, (1, 1, dilation_h, dilation_w)) + kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w)) pre_computed = False CO, _, KH, KW = get_const_tuple(kernel.shape) else: @@ -275,11 +277,10 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt H_CAT, W_CAT, CO, CI = get_const_tuple(kernel.shape) KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1 HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) - pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) + pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW)) - assert layout == 'NCHW' assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1 - data_pad = pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") + data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") r = KW m = tile_size @@ -454,31 +455,77 @@ def _schedule_winograd(cfg, s, op): tile_and_bind3d(s, output, k, h, w, 1, 2, 2) -##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM ##### -@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, 'bifrost', ['winograd']) -def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): - """TOPI compute callback""" - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype) +##### REGISTER ALTER OP LAYOUT ##### +@nn.conv2d_alter_layout.register(["bifrost"]) +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + if cfg.is_fallback: # if is fallback, clear query cache and return None + autotvm.task.clear_fallback_cache(target, workload) + return None -@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform, - 'bifrost', ['winograd']) -def schedule_conv2d_winograd_without_weight_transform_(cfg, outs): - """TOPI schedule callback""" - s = tvm.create_schedule([x.op for x in outs]) + topi_tmpl = workload[0] + new_attrs = {k: attrs[k] for k in attrs.keys()} - def _callback(op): - if 'winograd_conv2d_output' in op.tag: - _schedule_winograd(cfg, s, op) + strides = attrs.get_int_tuple("strides") + padding = attrs.get_int_tuple("padding") + dilation = attrs.get_int_tuple("dilation") + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data, kernel = tinfos + out_dtype = out_type.dtype - traverse_inline(s, outs[0].op, _callback) - return s + idxd = tvm.indexdiv + + if topi_tmpl == "conv2d_nchw_spatial_pack.bifrost": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + VC = cfg['tile_co'].size[-1] + new_attrs['kernel_layout'] = 'OIHW%do' % VC -##### REGISTER ALTER OP LAYOUT ##### -@conv2d_alter_layout.register(["bifrost"]) -def _alter_conv2d_layout(attrs, inputs, tinfos, F): - try: - return _alter_conv2d_layout_arm(attrs, inputs, tinfos, F) - except KeyError: # to filter out fallback opencl templates + new_data = data + new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + "conv2d_nchw_spatial_pack.bifrost") + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.conv2d(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_nchw_winograd.bifrost": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + tile_size = 2 + + weight_expr = inputs[1] + weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform( + weight_expr, tile_size=tile_size) + weight_expr = relay.reshape( + weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, CO, CI)) + + new_attrs['tile_size'] = tile_size + + new_data = data + new_kernel = tvm.placeholder( + (KH + tile_size - 1, KW + tile_size -1, CO, CI), kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + 'conv2d_nchw_winograd.bifrost') + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.contrib_conv2d_winograd_without_weight_transform( + inputs[0], weight_expr, **new_attrs) + else: return None diff --git a/topi/python/topi/bifrost/dense.py b/topi/python/topi/bifrost/dense.py index 114168f275144..dadb8db96bc8b 100644 --- a/topi/python/topi/bifrost/dense.py +++ b/topi/python/topi/bifrost/dense.py @@ -15,19 +15,22 @@ # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name,unused-variable -"""dense schedule on ARM Mali GPU""" +"""dense schedule on ARM Mali Biforst GPU""" from __future__ import absolute_import as _abs import tvm from tvm import autotvm -from .. import generic, nn +from .. import nn from ..util import traverse_inline -autotvm.register_topi_compute(nn.dense, 'bifrost', 'direct', nn.dense.fdefault) +@autotvm.register_topi_compute('dense.biforst') +def dense(_, data, weight, bias=None, out_dtype=None): + """Dense operator on Biforst""" + return nn.dense(data, weight, bias, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_dense, 'bifrost', 'direct') +@autotvm.register_topi_schedule('dense.bifrost') def schedule_dense(cfg, outs): """Schedule for dense operator. @@ -66,7 +69,7 @@ def _callback(op): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - 'mali', 'rk3399', 'dense', 'direct') + 'mali', 'rk3399', 'dense.bifrost') cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### diff --git a/topi/python/topi/bifrost/depthwise_conv2d.py b/topi/python/topi/bifrost/depthwise_conv2d.py index 305abee0bcd92..4f7b0db7f95f4 100644 --- a/topi/python/topi/bifrost/depthwise_conv2d.py +++ b/topi/python/topi/bifrost/depthwise_conv2d.py @@ -21,11 +21,9 @@ from __future__ import absolute_import as _abs import tvm -from .. import generic from .. import util from .. import tag -@generic.schedule_depthwise_conv2d_nchw.register(["bifrost"]) def schedule_depthwise_conv2d_nchw(outs): """Schedule for depthwise_conv2d nchw forward. diff --git a/topi/python/topi/cuda/__init__.py b/topi/python/topi/cuda/__init__.py index 4c20dd0075d62..6e38318a00627 100644 --- a/topi/python/topi/cuda/__init__.py +++ b/topi/python/topi/cuda/__init__.py @@ -19,23 +19,27 @@ """CUDA specific declaration and schedules.""" from __future__ import absolute_import as _abs -from . import conv1d, conv2d, depthwise_conv2d, conv2d_transpose_nchw, \ - deformable_conv2d, group_conv2d_nchw, dense, conv1d_transpose_ncw -from . import conv3d -from .conv2d_hwcn import schedule_conv2d_hwcn -from .depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc -from .depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc -from .group_conv2d_nchw import schedule_conv2d_nchw_cuda +from .conv1d import * +from .conv1d_transpose_ncw import * +from .conv2d import * +from .conv2d_hwcn import * +from .conv2d_int8 import * +from .conv2d_winograd import * +from .depthwise_conv2d import * +from .group_conv2d_nchw import * +from . import conv2d_alter_op +from .conv2d_transpose_nchw import * +from .deformable_conv2d import * +from .conv3d import * from .reduction import schedule_reduce from .softmax import schedule_softmax from .injective import schedule_injective, schedule_elemwise, schedule_broadcast -from .dense import schedule_dense -from .pooling import schedule_pool, schedule_adaptive_pool +from .dense import * +from .pooling import * from .nn import schedule_lrn -from .batch_matmul import schedule_batch_matmul +from .batch_matmul import * from .vision import * -from . import ssd from .ssd import * -from .nms import * +from .nms import get_valid_counts, non_max_suppression from .rcnn import * from .sort import * diff --git a/topi/python/topi/cuda/batch_matmul.py b/topi/python/topi/cuda/batch_matmul.py index 24fc2a17aa183..e293c7ad41e88 100644 --- a/topi/python/topi/cuda/batch_matmul.py +++ b/topi/python/topi/cuda/batch_matmul.py @@ -19,34 +19,8 @@ from __future__ import absolute_import as _abs import tvm from tvm.contrib import cublas -from topi.nn import batch_matmul, batch_matmul_default -from .. import generic from ..util import traverse_inline, get_const_tuple, get_max_power2_factor -@batch_matmul.register(["cuda", "gpu"]) -def batch_matmul_cuda(x, y): - """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are - data in batch. - - Parameters - ---------- - x : tvm.Tensor - 3-D with shape [batch, M, K] - - y : tvm.Tensor - 3-D with shape [batch, N, K] - - Returns - ------- - output : tvm.Tensor - 3-D with shape [batch, M, N] - """ - target = tvm.target.Target.current() - if target.target_name == "cuda" and "cublas" in target.libs: - return cublas.batch_matmul(x, y, False, True) - return batch_matmul_default(x, y) - -@generic.schedule_batch_matmul.register(["cuda", "gpu"]) def schedule_batch_matmul(outs): """Schedule for batch_matmul @@ -61,10 +35,6 @@ def schedule_batch_matmul(outs): s: Schedule The computation schedule for the op. """ - target = tvm.target.Target.current() - if target.target_name == "cuda" and "cublas" in target.libs: - return generic.schedule_extern(outs) - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) @@ -134,3 +104,22 @@ def _callback(op): traverse_inline(s, outs[0].op, _callback) return s + +def batch_matmul_cublas(x, y): + """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are + data in batch. + + Parameters + ---------- + x : tvm.Tensor + 3-D with shape [batch, M, K] + + y : tvm.Tensor + 3-D with shape [batch, N, K] + + Returns + ------- + output : tvm.Tensor + 3-D with shape [batch, M, N] + """ + return cublas.batch_matmul(x, y, False, True) diff --git a/topi/python/topi/cuda/conv1d.py b/topi/python/topi/cuda/conv1d.py index 43754a31df48d..56918e2bbba2b 100644 --- a/topi/python/topi/cuda/conv1d.py +++ b/topi/python/topi/cuda/conv1d.py @@ -19,67 +19,22 @@ import tvm from tvm import autotvm -from .. import nn, generic +from .. import nn from ..util import traverse_inline, get_const_tuple -@autotvm.register_topi_compute(nn.conv1d, ['cuda', 'gpu'], ['direct']) -def conv1d_cuda(cfg, - data, - kernel, - strides, - padding, - dilation, - layout='NCW', - out_dtype='float32'): - """ 1D convolution forward operator for cuda backend. +@autotvm.register_topi_compute("conv1d_ncw.cuda") +def conv1d_ncw(cfg, + data, + kernel, + strides, + padding, + dilation, + out_dtype='float32'): + return nn.conv1d_ncw(data, kernel, strides, padding, dilation, out_dtype) - Parameters - ---------- - cfg : ConfigEntity - The config for this template - - data : tvm.Tensor - 3-D input shape [batch, in_channel, in_width] for layout == 'NCW' - and [batch, in_width, in_channel] for layout == 'NWC' - - kernel : tvm.Tensor - 3-D kernel with shape [num_filter, in_channel, filter_size] for layout == 'NCW' - and [filter_size, in_channel, num_filter] for layout == 'NWC' - - strides : int or tuple - The spatial stride along width - padding : int or str - Padding size, or ['VALID', 'SAME'] - - dilation : int or tuple - Dilation rate if convolution should be dilated. - - layout : str - How input data is laid out, must be one of ['NCW', 'NWC'] - - out_dtype : str - The output data type. If None then output is same type as input. - """ - if out_dtype is None: - out_dtype = data.dtype - if isinstance(strides, (tuple, list)): - strides = strides[0] - if isinstance(dilation, (tuple, list)): - dilation = dilation[0] - - if layout == 'NCW': - return nn.conv1d_ncw(data, kernel, strides, padding, dilation, - out_dtype) - if layout == 'NWC': - return nn.conv1d_nwc(data, kernel, strides, padding, dilation, - out_dtype) - raise ValueError("This layout is not yet supported: {}".format(layout)) - - -@autotvm.register_topi_schedule(generic.schedule_conv1d_ncw, ["cuda", "gpu"], - ["direct"]) +@autotvm.register_topi_schedule("conv1d_ncw.cuda") def schedule_conv1d_ncw(cfg, outs): """TOPI schedule callback of conv1d ncw for cuda gpu @@ -193,8 +148,18 @@ def _callback(op): return s -@autotvm.register_topi_schedule(generic.schedule_conv1d_nwc, ["cuda", "gpu"], - ["direct"]) +@autotvm.register_topi_compute("conv1d_nwc.cuda") +def conv1d_nwc(cfg, + data, + kernel, + strides, + padding, + dilation, + out_dtype='float32'): + return nn.conv1d_nwc(data, kernel, strides, padding, dilation, out_dtype) + + +@autotvm.register_topi_schedule("conv1d_nwc.cuda") def schedule_conv1d_nwc(cfg, outs): """TOPI schedule callback of conv1d nwc for cuda gpu diff --git a/topi/python/topi/cuda/conv1d_transpose_ncw.py b/topi/python/topi/cuda/conv1d_transpose_ncw.py index 4cedbd529f024..4802a0d144a3f 100644 --- a/topi/python/topi/cuda/conv1d_transpose_ncw.py +++ b/topi/python/topi/cuda/conv1d_transpose_ncw.py @@ -19,11 +19,11 @@ import tvm from tvm import autotvm -from .. import nn, generic +from .. import nn from ..util import get_const_tuple, traverse_inline -@autotvm.task.register_topi_compute(nn.conv1d_transpose_ncw, ['cuda', 'gpu'], "direct") -def conv1d_transpose_ncw_cuda(cfg, data, kernel, stride, padding, out_dtype): +@autotvm.task.register_topi_compute("conv1d_transpose_nchw.cuda") +def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype): """Transposed 1D convolution ncw forward operator. Parameters @@ -79,9 +79,8 @@ def conv1d_transpose_ncw_cuda(cfg, data, kernel, stride, padding, out_dtype): return data_out -@autotvm.task.register_topi_schedule(generic.schedule_conv1d_transpose_ncw, - ['cuda', 'gpu'], 'direct') -def schedule_conv1d_transpose_ncw_cuda(cfg, outs): +@autotvm.task.register_topi_schedule("conv1d_transpose_nchw.cuda") +def schedule_conv1d_transpose_ncw(cfg, outs): """TOPI Schedule callback for conv1d_transpose operator. Parameters diff --git a/topi/python/topi/cuda/conv2d.py b/topi/python/topi/cuda/conv2d.py index f26069cfc3f0c..6fabb9d076ca6 100644 --- a/topi/python/topi/cuda/conv2d.py +++ b/topi/python/topi/cuda/conv2d.py @@ -23,179 +23,91 @@ from .. import nn, generic from ..nn.util import get_pad_tuple from ..util import get_const_tuple, traverse_inline - from .conv2d_direct import schedule_direct_cuda -from .conv2d_winograd import winograd_cuda, schedule_winograd_cuda -from .conv2d_int8 import conv2d_NCHWc_int8, schedule_conv2d_NCHWc_int8 - - -@autotvm.register_topi_compute(nn.conv2d, ['cuda', 'gpu'], ['direct', 'winograd', 'int8']) -def conv2d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCHW', out_dtype='float32'): - """Conv2D operator for cuda backend. - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - data : tvm.Tensor - 4-D with shape [batch, in_channel, in_height, in_width] or - 5-D with shape [batch, ic_chunk, in_height, in_width, ic_block] - - kernel : tvm.Tensor - 4-D with shape [num_filter, in_channel, filter_height, filter_width] or - 6-D with shape [num_filter_chunk, in_channel_chunk, filter_height, - filter_width, num_filter_block, in_channel_block] - - strides : int or a list/tuple of two ints - stride size, or [stride_height, stride_width] - - padding : int or a list/tuple of 2 or 4 ints - padding size, or - [pad_height, pad_width] for 2 ints, or - [pad_top, pad_left, pad_bottom, pad_right] for 4 ints - dilation: int or a list/tuple of two ints - dilation size, or [dilation_height, dilation_width] - layout : str - layout of data +@autotvm.register_topi_compute("conv2d_nchw.cuda") +def conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'): + return nn.conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) - out_dtype: str - The output type. This is used for mixed precision. - - Returns - ------- - output : tvm.Tensor - 4-D with shape [batch, out_channel, out_height, out_width] - """ - target = tvm.target.Target.current() - - if "cudnn" in target.libs: - if layout == 'NCHW': - tensor_format = 0 # CUDNN_TENSOR_NCHW - N, _, H, W = get_const_tuple(data.shape) - elif layout == 'NHWC': - tensor_format = 1 # CUDNN_TENSOR_NHWC - N, H, W, _ = get_const_tuple(data.shape) - else: - raise ValueError("Unsupported layout %s in cudnn" % layout) - CO, CI, KH, KW = get_const_tuple(kernel.shape) - - # handle dilation - stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides - dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation - - if isinstance(padding, (list, tuple)) and len(padding) == 4 and \ - (padding[0] != padding[2] or padding[1] != padding[3]): - raise ValueError("Cudnn doesn't support asymmetric padding.") - pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) - OH = (H + pt + pb - KH) // stride_h + 1 - OW = (W + pl + pr - KW) // stride_w + 1 - cfg.add_flop(2 * N * OH * OW * CO * CI * ((KH - 1) * dilation_h + 1) *\ - ((KW - 1) * dilation_w + 1)) - - if data.dtype == "int8" or kernel.dtype == "int8": - if layout == 'NCHW': - raise ValueError("NCHW layout do not support int8 in cudnn") - dtype = "int32" - else: - dtype = data.dtype - - return cudnn.conv_forward(data, - kernel, - [pt, pl], # cudnn padding pt, pl on both sides of input - [stride_h, stride_w], - [dilation_h, dilation_w], - conv_mode=1, - tensor_format=tensor_format, - algo=-1, # let CUDNN choose the best algo - conv_dtype=dtype) - - if cfg.template_key == 'winograd': - return winograd_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, - pre_computed=False) - if cfg.template_key == 'int8': - if (data.dtype == 'int8' or data.dtype == 'uint8'): - return conv2d_NCHWc_int8( - cfg, data, kernel, strides, padding, dilation, layout, out_dtype) - - if layout == 'NCHW': - return nn.conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) - if layout == 'HWCN': - return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype) - if layout == 'NHWC': - return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype) - raise ValueError("not support this layout {} yet".format(layout)) - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_nchw, ["cuda", "gpu"], - ["direct", 'winograd', "int8"]) -def schedule_conv2d_nchw_cuda(cfg, outs): - """TOPI schedule callback of conv2d for cuda gpu - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - outs: Array of Tensor - The computation graph description of conv2d - in the format of an array of tensors. - - Returns - ------- - s: Schedule - The computation schedule for conv2d. - """ - target = tvm.target.Target.current() - if 'cudnn' in target.libs: - return generic.schedule_extern(outs) +@autotvm.register_topi_schedule("conv2d_nchw.cuda") +def schedule_conv2d_nchw(cfg, outs): outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == 'conv2d_nchw': schedule_direct_cuda(cfg, s, op.output(0)) - if op.tag == 'conv2d_nchw_winograd': - schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False) - if op.tag == "conv2d_NCHWc_int8": - schedule_conv2d_NCHWc_int8(cfg, s, op.output(0)) traverse_inline(s, outs[0].op, _callback) return s -@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc, ["cuda", "gpu"], - ["direct"]) -def schedule_conv2d_nhwc_cuda(cfg, outs): - """TOPI schedule for CUDA conv2d_nhwc - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - outs: Array of Tensor - The computation graph description of conv2d - in the format of an array of tensors. - - Returns - ------- - s: Schedule - The computation schedule for conv2d. - """ - target = tvm.target.Target.current() - if 'cudnn' in target.libs: - return generic.schedule_extern(outs) - - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs - s = tvm.create_schedule([x.op for x in outs]) +# TODO(@alexgl-github): It's invalid to call schedule_direct_cuda for NHWC layout +# as it assumes the input layout to be NCHW. Please fix this. +# @autotvm.register_topi_compute("conv2d_nhwc.cuda") +# def conv2d_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'): +# return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype) +# +# +# @autotvm.register_topi_schedule("conv2d_nhwc.cuda") +# def schedule_conv2d_nhwc(cfg, outs): +# outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs +# s = tvm.create_schedule([x.op for x in outs]) +# +# def _callback(op): +# if op.tag == 'conv2d_nhwc': +# schedule_direct_cuda(cfg, s, op.output(0)) +# +# traverse_inline(s, outs[0].op, _callback) +# return s - def _callback(op): - if op.tag == 'conv2d_nhwc': - schedule_direct_cuda(cfg, s, op.output(0)) - traverse_inline(s, outs[0].op, _callback) - return s +@autotvm.register_topi_compute("conv2d_cudnn.cuda") +def conv2d_cudnn(cfg, data, kernel, strides, padding, dilation, layout='NCHW', + out_dtype='float32'): + if layout == 'NCHW': + tensor_format = 0 # CUDNN_TENSOR_NCHW + N, _, H, W = get_const_tuple(data.shape) + elif layout == 'NHWC': + tensor_format = 1 # CUDNN_TENSOR_NHWC + N, H, W, _ = get_const_tuple(data.shape) + else: + raise ValueError("Unsupported layout %s in cudnn" % layout) + CO, CI, KH, KW = get_const_tuple(kernel.shape) + + # handle dilation + stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides + dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation + + if isinstance(padding, (list, tuple)) and len(padding) == 4 and \ + (padding[0] != padding[2] or padding[1] != padding[3]): + raise ValueError("Cudnn doesn't support asymmetric padding.") + pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) + OH = (H + pt + pb - KH) // stride_h + 1 + OW = (W + pl + pr - KW) // stride_w + 1 + cfg.add_flop(2 * N * OH * OW * CO * CI * ((KH - 1) * dilation_h + 1) * \ + ((KW - 1) * dilation_w + 1)) + + if data.dtype == "int8" or kernel.dtype == "int8": + if layout == 'NCHW': + raise ValueError("NCHW layout do not support int8 in cudnn") + dtype = "int32" + else: + dtype = data.dtype + + return cudnn.conv_forward(data, + kernel, + [pt, pl], # cudnn padding pt, pl on both sides of input + [stride_h, stride_w], + [dilation_h, dilation_w], + conv_mode=1, + tensor_format=tensor_format, + algo=-1, # let CUDNN choose the best algo + conv_dtype=dtype) + + +@autotvm.register_topi_schedule("conv2d_cudnn.cuda") +def schedule_conv2d_cudnn(cfg, outs): + return generic.schedule_extern(outs) diff --git a/topi/python/topi/cuda/conv2d_alter_op.py b/topi/python/topi/cuda/conv2d_alter_op.py new file mode 100644 index 0000000000000..614158c1ac3d6 --- /dev/null +++ b/topi/python/topi/cuda/conv2d_alter_op.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,unused-variable,unused-argument +"""Conv2D alter op and legalize functions for cuda backend""" + +import logging +import tvm +from tvm import relay +from tvm import autotvm + +from .. import nn +from ..util import get_const_tuple +from .conv2d_winograd import _infer_tile_size + +logger = logging.getLogger('topi') + +@nn.conv2d_alter_layout.register(["cuda", "gpu"]) +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + if cfg.is_fallback: # if is fallback, clear query cache and return None + autotvm.task.clear_fallback_cache(target, workload) + return None + + topi_tmpl = workload[0] + new_attrs = {k: attrs[k] for k in attrs.keys()} + + strides = attrs.get_int_tuple("strides") + padding = attrs.get_int_tuple("padding") + dilation = attrs.get_int_tuple("dilation") + groups = attrs.get_int('groups') + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data, kernel = tinfos + out_dtype = out_type.dtype + + if topi_tmpl == "conv2d_NCHWc_int8.cuda": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + + new_layout = 'NCHW4c' + new_attrs["channels"] = CO + new_attrs["data_layout"] = new_layout + new_attrs['out_layout'] = new_layout + new_attrs['kernel_layout'] = 'OIHW4o4i' + ic_block_factor = oc_block_factor = 4 + + # Store the same config for the altered operator (workload) + new_data = tvm.placeholder((N, CI // ic_block_factor, H, W, ic_block_factor), + dtype=data.dtype) + new_kernel = tvm.placeholder((CO // oc_block_factor, CI // ic_block_factor, KH, KW, \ + oc_block_factor, ic_block_factor), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype], + "conv2d_NCHWc_int8.cuda") + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.conv2d(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_nchw_winograd.cuda": + if dilation != (1, 1): + logger.warning("Does not support weight pre-transform for dilated convolution.") + return None + + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + + # pre-compute weight transformation in winograd + tile_size = _infer_tile_size(tinfos[0], tinfos[1]) + + weight = relay.nn.contrib_conv2d_winograd_weight_transform(inputs[1], + tile_size=tile_size) + weight = relay.transpose(weight, axes=[0, 1, 3, 2]) + new_attrs['tile_size'] = tile_size + new_attrs['channels'] = CO + + # Store the same config for the altered operator (workload) + new_data = data + new_weight = tvm.placeholder((KH + tile_size - 1, KW + tile_size - 1, CI, CO), + dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_weight, strides, padding, dilation, out_dtype, tile_size], + "conv2d_nchw_winograd_without_weight_transform.cuda") + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_conv2d_winograd_without_weight_transform( + inputs[0], weight, **new_attrs) + elif topi_tmpl == "group_conv2d_NCHWc_int8.cuda": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + + new_layout = 'NCHW4c' + new_attrs["channels"] = CO + new_attrs["data_layout"] = new_layout + new_attrs['out_layout'] = new_layout + new_attrs['kernel_layout'] = 'OIHW4o4i' + ic_block_factor = oc_block_factor = 4 + + # Store the same config for the altered operator (workload) + new_data = tvm.placeholder((N, CI // ic_block_factor, H, W, ic_block_factor), + dtype=data.dtype) + new_kernel = tvm.placeholder((CO // oc_block_factor, CI // ic_block_factor // groups, + KH, KW, oc_block_factor, ic_block_factor), + dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, groups, out_dtype], + "group_conv2d_NCHWc_int8.cuda") + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.conv2d(*inputs, **new_attrs) + else: + return None diff --git a/topi/python/topi/cuda/conv2d_direct.py b/topi/python/topi/cuda/conv2d_direct.py index b7df88579f493..2fab8cf122536 100644 --- a/topi/python/topi/cuda/conv2d_direct.py +++ b/topi/python/topi/cuda/conv2d_direct.py @@ -43,7 +43,7 @@ def schedule_direct_cuda(cfg, s, conv): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - target.target_name, target.model, 'conv2d', 'direct') + target.target_name, target.model, 'conv2d_nchw.cuda') cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### diff --git a/topi/python/topi/cuda/conv2d_hwcn.py b/topi/python/topi/cuda/conv2d_hwcn.py index 18a624a67aea5..635bf4d2fd6e6 100644 --- a/topi/python/topi/cuda/conv2d_hwcn.py +++ b/topi/python/topi/cuda/conv2d_hwcn.py @@ -20,10 +20,14 @@ from tvm import autotvm from tvm.autotvm.task.space import SplitEntity -from .. import generic, tag +from .. import nn, tag +@autotvm.register_topi_compute("conv2d_hwcn.cuda") +def conv2d_hwcn(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'): + return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_conv2d_hwcn, ["cuda", "gpu"], ["direct"]) + +@autotvm.register_topi_schedule("conv2d_hwcn.cuda") def schedule_conv2d_hwcn(cfg, outs): """Schedule for conv2d_hwcn and any element-wise operations. diff --git a/topi/python/topi/cuda/conv2d_int8.py b/topi/python/topi/cuda/conv2d_int8.py index 580cf96b53e83..cab1191be5fc9 100644 --- a/topi/python/topi/cuda/conv2d_int8.py +++ b/topi/python/topi/cuda/conv2d_int8.py @@ -23,9 +23,10 @@ from .tensor_intrin import dp4a from ..nn.pad import pad from ..nn.util import get_pad_tuple -from ..util import get_const_tuple +from ..util import get_const_tuple, traverse_inline +@autotvm.register_topi_compute("conv2d_NCHWc_int8.cuda") def conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, layout, out_dtype): """Convolution operator in NCHW[x]c layout for int8. @@ -152,7 +153,20 @@ def conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, layout, out_ _dp4a = dp4a('shared', 'shared', 'local') -def schedule_conv2d_NCHWc_int8(cfg, s, output): +@autotvm.register_topi_schedule("conv2d_NCHWc_int8.cuda") +def schedule_conv2d_NCHWc_int8(cfg, outs): + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == 'conv2d_NCHWc_int8': + _schedule_conv2d_NCHWc_int8(cfg, s, op.output(0)) + + traverse_inline(s, outs[0].op, _callback) + return s + + +def _schedule_conv2d_NCHWc_int8(cfg, s, output): """Schedule conv2d int8 NCHWc template""" conv = output.op.input_tensors[0] packed_data, packed_kernel = conv.op.input_tensors diff --git a/topi/python/topi/cuda/conv2d_transpose_nchw.py b/topi/python/topi/cuda/conv2d_transpose_nchw.py index be9f31567bc9f..c39a2fcac6a6f 100644 --- a/topi/python/topi/cuda/conv2d_transpose_nchw.py +++ b/topi/python/topi/cuda/conv2d_transpose_nchw.py @@ -20,12 +20,12 @@ import tvm from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity -from .. import nn, generic +from .. import nn from ..util import get_const_tuple, traverse_inline -@autotvm.task.register_topi_compute(nn.conv2d_transpose_nchw, ['cuda', 'gpu'], "direct") -def conv2d_transpose_nchw_cuda(cfg, data, kernel, stride, padding, out_dtype): +@autotvm.register_topi_compute("nn.conv2d_transpose_nchw.cuda") +def conv2d_transpose_nchw(cfg, data, kernel, stride, padding, out_dtype): """Transposed 2D convolution nchw forward operator. Parameters @@ -101,9 +101,8 @@ def conv2d_transpose_nchw_cuda(cfg, data, kernel, stride, padding, out_dtype): return data_out -@autotvm.task.register_topi_schedule(generic.schedule_conv2d_transpose_nchw, - ['cuda', 'gpu'], 'direct') -def schedule_conv2d_transpose_nchw_cuda(cfg, outs): +@autotvm.register_topi_schedule("nn.conv2d_transpose_nchw.cuda") +def schedule_conv2d_transpose_nchw(cfg, outs): """TOPI Schedule callback for conv2d transpose operator. Parameters diff --git a/topi/python/topi/cuda/conv2d_winograd.py b/topi/python/topi/cuda/conv2d_winograd.py index 37307d62357d9..0f22a48bd3689 100644 --- a/topi/python/topi/cuda/conv2d_winograd.py +++ b/topi/python/topi/cuda/conv2d_winograd.py @@ -22,9 +22,7 @@ from tvm import autotvm from .. import nn -from ..nn import conv2d, group_conv2d_nchw, conv2d_winograd_without_weight_transform from ..util import get_const_int, get_const_tuple, traverse_inline -from ..generic import schedule_conv2d_winograd_without_weight_transform from ..nn.winograd_util import winograd_transform_matrices @@ -37,10 +35,9 @@ def _infer_tile_size(data, kernel): return 4 return 2 -def winograd_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, pre_computed): +def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, + pre_computed): """Compute declaration for winograd""" - assert layout == 'NCHW' - tile_size = _infer_tile_size(data, kernel) N, CI, H, W = get_const_tuple(data.shape) @@ -53,7 +50,7 @@ def winograd_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dty if not pre_computed: # kernel tensor is raw tensor, do strict check if dilation_h != 1 or dilation_w != 1: - kernel = dilation(kernel, (1, 1, dilation_h, dilation_w)) + kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w)) CO, CI, KH, KW = get_const_tuple(kernel.shape) alpha = KW + tile_size - 1 assert HSTR == 1 and WSTR == 1 and KH == KW @@ -282,161 +279,38 @@ def schedule_winograd_cuda(cfg, s, output, pre_computed): return s -##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM ##### -@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, - ['cuda', 'gpu'], ['winograd']) -def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): - return winograd_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, - pre_computed=True) - +@autotvm.register_topi_compute("conv2d_nchw_winograd.cuda") +def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype): + return winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, + pre_computed=False) -@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform, - ['cuda', 'gpu'], ['winograd']) -def schedule_conv2d_winograd_without_weight_transform_cuda(cfg, outs): - """TOPI schedule callback""" +@autotvm.register_topi_schedule("conv2d_nchw_winograd.cuda") +def schedule_conv2d_nchw_winograd(cfg, outs): s = tvm.create_schedule([x.op for x in outs]) def _callback(op): if 'conv2d_nchw_winograd' in op.tag: - schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True) + schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False) traverse_inline(s, outs[0].op, _callback) return s -##### REGISTER ALTER OP LAYOUT ##### -@nn.conv2d_alter_layout.register(["cuda", "gpu"]) -def _alter_conv2d_layout(attrs, inputs, tinfos, F): - """Alter op layout for pre-computing kernel transformation - - Parameters - ---------- - attrs : tvm.ir.Attrs - Attributes of current convolution - inputs : tvm.relay.Expr - Grouped input symbols - tinfos : list - Input shape and dtype - F: symbol - The context, can be relay.op - - Note - ---- - Unlike other TOPI functions, this function operates on both graph level and operator level, - so we have to pass 'F' to make it support our two versions of graph IR, Relay. - """ - if 'cudnn' in tvm.target.Target.current().libs or 'miopen' in tvm.target.Target.current().libs: - return None - - copy_inputs = list(inputs) - new_attrs = {k: attrs[k] for k in attrs.keys()} - - - new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')] - - strides = attrs.get_int_tuple("strides") - padding = attrs.get_int_tuple("padding") - dilation = attrs.get_int_tuple("dilation") - groups = attrs.get_int('groups') - data_layout_key = "data_layout" if "data_layout" in new_attrs else "layout" - layout = attrs[data_layout_key] - out_dtype = attrs["out_dtype"] - if out_dtype in ("", "same"): - out_dtype = tinfos[0].dtype - - data, kernel = tinfos[0:2] - N, CI, H, W = get_const_tuple(data.shape) - CO, _, KH, KW = get_const_tuple(kernel.shape) +@autotvm.register_topi_compute("conv2d_nchw_winograd_without_weight_transform.cuda") +def conv2d_nchw_winograd_without_weight_transform(cfg, data, kernel, strides, + padding, dilation, out_dtype): + return winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, + pre_computed=True) - dispatch_ctx = autotvm.DispatchContext.current - target = tvm.target.Target.current() - if groups == 1: - # query config of this workload - workload = autotvm.task.args_to_workload( - [tinfos[0], tinfos[1], strides, padding, dilation, layout, out_dtype], conv2d) - cfg = autotvm.DispatchContext.current.query(target, workload) - - if cfg.is_fallback: # if is fallback, clear query cache and return None - autotvm.task.clear_fallback_cache(target, workload) - return None - - if cfg.template_key == 'direct': - return None - - if cfg.template_key == 'int8': - assert 'cuda' in target.keys - new_layout = 'NCHW4c' - new_attrs[data_layout_key] = new_layout - new_attrs['out_layout'] = new_layout - new_attrs['kernel_layout'] = 'OIHW4o4i' - ic_block_factor = oc_block_factor = 4 - - # Store the same config for the altered operator (workload) - new_data = tvm.placeholder((N, CI // ic_block_factor, H, W, ic_block_factor), - dtype=data.dtype) - new_kernel = tvm.placeholder((CO // oc_block_factor, CI // ic_block_factor, KH, KW,\ - oc_block_factor, ic_block_factor), dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype], - conv2d - ) - dispatch_ctx.update(target, new_workload, cfg) - return F.nn.conv2d(*copy_inputs, **new_attrs) - - if attrs.get_int_tuple("dilation") != (1, 1): - logger.warning("Does not support weight pre-transform for dilated convolution.") - return None - - # pre-compute weight transformation in winograd - tile_size = _infer_tile_size(tinfos[0], tinfos[1]) - - weight = F.nn.contrib_conv2d_winograd_weight_transform(copy_inputs[1], - tile_size=tile_size) - weight = F.transpose(weight, axes=[0, 1, 3, 2]) - copy_inputs[1] = weight - new_attrs['tile_size'] = tile_size - - # Store the same config for the altered operator (workload) - new_data = data - new_weight = tvm.placeholder((KH + tile_size - 1, KW + tile_size - 1, CI, CO), - dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, layout, out_dtype, tile_size], - conv2d_winograd_without_weight_transform - ) - dispatch_ctx.update(target, new_workload, cfg) - return F.nn.contrib_conv2d_winograd_without_weight_transform(*copy_inputs, **new_attrs) - if groups != CI: - workload = autotvm.task.args_to_workload( - [tinfos[0], tinfos[1], strides, padding, dilation, groups, out_dtype], - group_conv2d_nchw) - cfg = autotvm.DispatchContext.current.query(target, workload) - - if cfg.is_fallback: # if is fallback, clear query cache and return None - autotvm.task.clear_fallback_cache(target, workload) - return None - - if cfg.template_key == 'int8': - assert 'cuda' in target.keys - new_layout = 'NCHW4c' - new_attrs[data_layout_key] = new_layout - new_attrs['out_layout'] = new_layout - new_attrs['kernel_layout'] = 'OIHW4o4i' - ic_block_factor = oc_block_factor = 4 - - # Store the same config for the altered operator (workload) - new_data = tvm.placeholder((N, CI // ic_block_factor, H, W, ic_block_factor), - dtype=data.dtype) - new_kernel = tvm.placeholder((CO // oc_block_factor, CI // ic_block_factor // groups,\ - KH, KW, oc_block_factor, ic_block_factor), - dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, groups, out_dtype], - group_conv2d_nchw - ) - dispatch_ctx.update(target, new_workload, cfg) - return F.nn.conv2d(*copy_inputs, **new_attrs) - - # do nothing for depthwise convolution - return None +@autotvm.register_topi_schedule("conv2d_nchw_winograd_without_weight_transform.cuda") +def schedule_conv2d_nchw_winograd_without_weight_transform_cuda(cfg, outs): + """TOPI schedule callback""" + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if 'conv2d_nchw_winograd' in op.tag: + schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True) + + traverse_inline(s, outs[0].op, _callback) + return s diff --git a/topi/python/topi/cuda/conv3d.py b/topi/python/topi/cuda/conv3d.py index b46f284ef5b7e..016fc7fb757c4 100644 --- a/topi/python/topi/cuda/conv3d.py +++ b/topi/python/topi/cuda/conv3d.py @@ -21,14 +21,13 @@ from tvm.contrib import cudnn from .. import nn, generic -from ..nn.util import get_pad_tuple3d from ..util import get_const_tuple, traverse_inline +from .conv3d_direct import schedule_direct_conv3d_cuda -from .conv3d_direct import schedule_direct_3d_cuda - -@autotvm.register_topi_compute(nn.conv3d, ['cuda', 'gpu'], ['direct']) -def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', out_dtype='float32'): +@autotvm.register_topi_compute("conv3d_ncdhw.cuda") +def conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', + out_dtype='float32'): """Conv3D operator for cuda backend. Parameters @@ -45,10 +44,8 @@ def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', o strides : int or a list/tuple of three ints stride size, or [stride_depth, stride_height, stride_width] - padding : int or a list/tuple of 3 or 6 ints - padding size, or - [pad_depth, pad_height, pad_width] for 3 ints, or - [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints + padding : int or a list/tuple of three ints + padding size, or [pad_depth, pad_height, pad_width] dilation: int or a list/tuple of three ints dilation size, or [dilation_depth, dilation_height, dilation_width] @@ -64,52 +61,11 @@ def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', o output : tvm.Tensor 5-D with shape [batch, out_channel, out_depth, out_height, out_width] """ - target = tvm.target.Target.current() - - if "cudnn" in target.libs: - if layout == 'NCDHW': - tensor_format = 0 # CUDNN_TENSOR_NCHW - N, _, D, H, W = get_const_tuple(data.shape) - elif layout == 'NDHWC': - tensor_format = 1 # CUDNN_TENSOR_NHWC - N, D, H, W, _ = get_const_tuple(data.shape) - else: - raise ValueError("Unsupported layout %s in cudnn" % layout) - CO, CI, KD, KH, KW = get_const_tuple(kernel.shape) - - # handle dilation - stride_d, stride_h, stride_w = (strides, strides, strides) if isinstance(strides, int) \ - else strides - if isinstance(padding, (list, tuple)) and len(padding) > 3: - raise ValueError("Cudnn doesn't support asymmetric padding.") - pf, pt, pl, pk, pb, pr = get_pad_tuple3d(padding, (KD, KH, KW)) - dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation) if \ - isinstance(dilation, int) else dilation - - OD = (D + pf + pk - KD) // stride_d + 1 - OH = (H + pt + pb - KH) // stride_h + 1 - OW = (W + pl + pr - KW) // stride_w + 1 - cfg.add_flop(2 * N * OD * OH * OW * CO * CI * ((KD - 1) * dilation_d + 1) *\ - ((KH - 1) * dilation_h + 1) * ((KW - 1) * dilation_w + 1)) - - return cudnn.conv_forward(data, - kernel, - [pf, pt, pl], # cudnn padding pt, pl on both sides of input - [stride_d, stride_h, stride_w], - [dilation_d, dilation_h, dilation_w], - conv_mode=1, - tensor_format=tensor_format, - algo=-1, # let CUDNN choose the best algo - conv_dtype=data.dtype) - - if layout == 'NCDHW': - return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, out_dtype) - raise ValueError("not support this layout {} yet".format(layout)) + return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, layout, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_conv3d_ncdhw, ["cuda", "gpu"], - ["direct"]) -def schedule_conv3d_ncdhw_cuda(cfg, outs): +@autotvm.register_topi_schedule("conv3d_ncdhw.cuda") +def schedule_conv3d_ncdhw(cfg, outs): """TOPI schedule callback of conv3d for cuda gpu Parameters @@ -126,24 +82,59 @@ def schedule_conv3d_ncdhw_cuda(cfg, outs): s: Schedule The computation schedule for conv2d. """ - target = tvm.target.Target.current() - if 'cudnn' in target.libs: - return generic.schedule_extern(outs) - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == 'conv3d_ncdhw': - schedule_direct_3d_cuda(cfg, s, op.output(0)) + schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NCDHW", + "conv3d_ncdhw.cuda") traverse_inline(s, outs[0].op, _callback) return s -@autotvm.register_topi_schedule(generic.schedule_conv3d_ndhwc, ["cuda", "gpu"], - ["direct"]) -def schedule_conv3d_ndhwc_cuda(cfg, outs): +@autotvm.register_topi_compute("conv3d_ndhwc.cuda") +def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, layout='NDHWC', + out_dtype='float32'): + """Conv3D operator for cuda backend. + + Parameters + ---------- + cfg: ConfigEntity + The config for this template + + data : tvm.Tensor + 5-D with shape [batch, in_channel, in_depth, in_height, in_width] + + kernel : tvm.Tensor + 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width] + + strides : int or a list/tuple of three ints + stride size, or [stride_depth, stride_height, stride_width] + + padding : int or a list/tuple of three ints + padding size, or [pad_depth, pad_height, pad_width] + + dilation: int or a list/tuple of three ints + dilation size, or [dilation_depth, dilation_height, dilation_width] + + layout : str + layout of data + + out_dtype: str + The output type. This is used for mixed precision. + + Returns + ------- + output : tvm.Tensor + 5-D with shape [batch, out_channel, out_depth, out_height, out_width] + """ + return nn.conv3d_ndhwc(data, kernel, strides, padding, dilation, layout, out_dtype) + + +@autotvm.register_topi_schedule("conv3d_ndhwc.cuda") +def schedule_conv3d_ndhwc(cfg, outs): """TOPI schedule callback of conv3d for cuda gpu Parameters @@ -160,16 +151,104 @@ def schedule_conv3d_ndhwc_cuda(cfg, outs): s: Schedule The computation schedule for conv2d. """ - target = tvm.target.Target.current() - if 'cudnn' in target.libs: - return generic.schedule_extern(outs) - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == 'conv3d_ndhwc': - schedule_direct_3d_cuda(cfg, s, op.output(0)) + schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NDHWC", + "conv3d_ndhwc.cuda") traverse_inline(s, outs[0].op, _callback) return s + + +@autotvm.register_topi_compute("conv3d_cudnn.cuda") +def conv3d_cudnn(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', + out_dtype='float32'): + """Conv3D operator for cuda backend. + + Parameters + ---------- + cfg: ConfigEntity + The config for this template + + data : tvm.Tensor + 5-D with shape [batch, in_channel, in_depth, in_height, in_width] + + kernel : tvm.Tensor + 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width] + + strides : int or a list/tuple of three ints + stride size, or [stride_depth, stride_height, stride_width] + + padding : int or a list/tuple of three ints + padding size, or [pad_depth, pad_height, pad_width] + + dilation: int or a list/tuple of three ints + dilation size, or [dilation_depth, dilation_height, dilation_width] + + layout : str + layout of data + + out_dtype: str + The output type. This is used for mixed precision. + + Returns + ------- + output : tvm.Tensor + 5-D with shape [batch, out_channel, out_depth, out_height, out_width] + """ + if layout == 'NCDHW': + tensor_format = 0 # CUDNN_TENSOR_NCHW + N, _, D, H, W = get_const_tuple(data.shape) + elif layout == 'NDHWC': + tensor_format = 1 # CUDNN_TENSOR_NHWC + N, D, H, W, _ = get_const_tuple(data.shape) + else: + raise ValueError("Unsupported layout %s in cudnn" % layout) + CO, CI, KD, KH, KW = get_const_tuple(kernel.shape) + + # handle dilation + stride_d, stride_h, stride_w = (strides, strides, strides) if isinstance(strides, int) \ + else strides + pad_d, pad_h, pad_w = (padding, padding, padding) if isinstance(padding, int) else padding + dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation) if \ + isinstance(dilation, int) else dilation + + OD = (D + 2 * pad_d - KD) // stride_d + 1 + OH = (H + 2 * pad_h - KH) // stride_h + 1 + OW = (W + 2 * pad_w - KW) // stride_w + 1 + cfg.add_flop(2 * N * OD * OH * OW * CO * CI * ((KD - 1) * dilation_d + 1) * \ + ((KH - 1) * dilation_h + 1) * ((KW - 1) * dilation_w + 1)) + + return cudnn.conv_forward(data, + kernel, + [pad_d, pad_h, pad_w], + [stride_d, stride_h, stride_w], + [dilation_d, dilation_h, dilation_w], + conv_mode=1, + tensor_format=tensor_format, + algo=-1, # let CUDNN choose the best algo + conv_dtype=dtype) + + +@autotvm.register_topi_schedule("conv3d_cudnn.cuda") +def schedule_conv3d_cudnn(_, outs): + """TOPI schedule callback of conv3d for cuda gpu + + Parameters + ---------- + cfg: ConfigEntity + The config for this template + + outs: Array of Tensor + The computation graph description of conv2d + in the format of an array of tensors. + + Returns + ------- + s: Schedule + The computation schedule for conv2d. + """ + return generic.schedule_extern(outs) diff --git a/topi/python/topi/cuda/conv3d_direct.py b/topi/python/topi/cuda/conv3d_direct.py index ad48deb275398..fa6c8781b5d3b 100644 --- a/topi/python/topi/cuda/conv3d_direct.py +++ b/topi/python/topi/cuda/conv3d_direct.py @@ -20,11 +20,16 @@ from tvm import autotvm from ..util import get_const_tuple -def schedule_direct_3d_cuda(cfg, s, conv): +def schedule_direct_conv3d_cuda(cfg, s, conv, layout, workload_name): """schedule optimized for batch size = 1""" ##### space definition begin ##### - n, f, d, y, x = s[conv].op.axis + if layout == "NCDHW": + n, f, d, y, x = s[conv].op.axis + elif layout == "NDHWC": + n, d, y, x, f = s[conv].op.axis + else: + raise ValueError("not support this layout {} yet".format(layout)) rc, rd, ry, rx = s[conv].op.reduce_axis cfg.define_split("tile_f", f, num_outputs=4) cfg.define_split("tile_d", d, num_outputs=4) @@ -45,7 +50,7 @@ def schedule_direct_3d_cuda(cfg, s, conv): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - target.target_name, target.model, 'conv3d', 'direct') + target.target_name, target.model, workload_name) cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### diff --git a/topi/python/topi/cuda/deformable_conv2d.py b/topi/python/topi/cuda/deformable_conv2d.py index 33a8c9adc1ca6..0cf7f5a799cc4 100644 --- a/topi/python/topi/cuda/deformable_conv2d.py +++ b/topi/python/topi/cuda/deformable_conv2d.py @@ -18,16 +18,18 @@ """Schedule template of deformable conv2d with cuda backend""" import tvm from tvm import autotvm -from .. import nn, generic +from .. import nn from ..util import traverse_inline -autotvm.register_topi_compute(nn.deformable_conv2d_nchw, ["cuda", "gpu"], "direct", - nn.deformable_conv2d_nchw.fdefault) +@autotvm.register_topi_compute("deformable_conv2d_nchw.cuda") +def deformable_conv2d_nchw(cfg, data, offset, kernel, strides, padding, dilation, + deformable_groups, groups, out_dtype): + return nn.deformable_conv2d_nchw(data, offset, kernel, strides, padding, dilation, + deformable_groups, groups, out_dtype) - -@autotvm.register_topi_schedule(generic.schedule_deformable_conv2d_nchw, ["cuda", "gpu"], "direct") -def schedule_deformable_conv2d_nchw_cuda(cfg, outs): +@autotvm.register_topi_schedule("deformable_conv2d_nchw.cuda") +def schedule_deformable_conv2d_nchw(cfg, outs): """TOPI schedule callback of deformable conv2d for cuda gpu Parameters @@ -49,13 +51,13 @@ def schedule_deformable_conv2d_nchw_cuda(cfg, outs): def _callback(op): if op.tag == 'deformable_conv2d_nchw': - schedule_direct_cuda(cfg, s, op.output(0)) + _schedule_direct_cuda(cfg, s, op.output(0)) traverse_inline(s, outs[0].op, _callback) return s -def schedule_direct_cuda(cfg, s, conv): +def _schedule_direct_cuda(cfg, s, conv): """Schedule template of deformable conv2d""" n, f, y, x = s[conv].op.axis rc, ry, rx = s[conv].op.reduce_axis diff --git a/topi/python/topi/cuda/dense.py b/topi/python/topi/cuda/dense.py index 1a1af703c55cf..6cdf5d8b8b3ee 100644 --- a/topi/python/topi/cuda/dense.py +++ b/topi/python/topi/cuda/dense.py @@ -23,110 +23,59 @@ from tvm.autotvm.task.space import SplitEntity from tvm.contrib import cublas from .tensor_intrin import dp4a -from ..nn.dense import dense, dense_default +from .. import nn from .. import tag from .. import generic from ..util import traverse_inline, get_const_tuple logger = logging.getLogger('topi') - -@autotvm.register_topi_compute(dense, ["cuda", "gpu"], "direct") -def dense_cuda(cfg, data, weight, bias=None, out_dtype=None): - """Dense operator for cuda backend. - - Parameters - ---------- - data : tvm.Tensor - 2-D with shape [batch, in_dim] - - weight : tvm.Tensor - 2-D with shape [out_dim, in_dim] - - bias : tvm.Tensor, optional - 1-D with shape [out_dim] - - Returns - ------- - output : tvm.Tensor - 2-D with shape [batch, out_dim] - """ - # pylint: disable=unused-argument +@autotvm.register_topi_compute("dense_cublas.cuda") +def dense_cublas(cfg, data, weight, bias=None, out_dtype=None): + """Dense operator on CUDA with CUBLAS""" assert len(data.shape) == 2 and len(weight.shape) == 2, \ "only support 2-dim dense" if bias is not None: assert len(bias.shape) == 1 if out_dtype is None: out_dtype = data.dtype + assert out_dtype == data.dtype, "Mixed precision not supported." batch, in_dim = data.shape out_dim, _ = weight.shape - target = tvm.target.Target.current() - if "cublas" in target.libs: - matmul = cublas.matmul(data, weight, False, True, out_dtype) - if bias is not None: - matmul = tvm.compute((batch, out_dim), \ - lambda i, j: matmul[i, j] + bias[j], \ - tag=tag.BROADCAST) - return matmul - return dense_default(data, weight, bias, out_dtype) + matmul = cublas.matmul(data, weight, False, True) + cfg.add_flop(batch * in_dim * out_dim * 2) + if bias is not None: + matmul = tvm.compute((batch, out_dim), + lambda i, j: matmul[i, j] + bias[j], + tag=tag.BROADCAST) + return matmul -@autotvm.register_topi_schedule(generic.schedule_dense, ["cuda", "gpu"], "direct") -def schedule_dense(cfg, outs): - """Schedule for dense operator. +@autotvm.register_topi_schedule("dense_cublas.cuda") +def schedule_dense_cublas(_, outs): + """Schedule dense operator using CUBLAS""" + return generic.schedule_extern(outs) - Parameters - ---------- - outs: Array of Tensor - The computation graph description of dense - in the format of an array of tensors. - Returns - ------- - s: Schedule - The computation schedule for dense. - """ - # pylint: disable=unused-argument - target = tvm.target.Target.current() +@autotvm.register_topi_compute("dense_small_batch.cuda") +def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None): + """Dense operator on CUDA""" + return nn.dense(data, weight, bias, out_dtype) - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs - if target.target_name == "cuda" and "cublas" in target.libs: - return generic.schedule_extern(outs) +@autotvm.register_topi_schedule("dense_small_batch.cuda") +def schedule_dense_small_batch(cfg, outs): + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) - def _schedule(C): - A, _ = C.op.input_tensors - batch, _ = get_const_tuple(A.shape) - if batch < 32: - return schedule_dense_small_batch(cfg, s, C) - return schedule_dense_large_batch(cfg, s, C) - - scheduled_ops = [] - - def traverse(OP): - """Internal traverse function""" - # inline all one-to-one-mapping operators except the last stage (output) - if tag.is_broadcast(OP.tag): - if OP not in s.outputs: - s[OP].compute_inline() - for tensor in OP.input_tensors: - if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops: - traverse(tensor.op) - # schedule dense - elif OP.tag == 'dense': - Dense = OP.output(0) - _schedule(Dense) - else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) - - scheduled_ops.append(OP) + def _callback(op): + if op.tag == 'dense': + _schedule_dense_small_batch(cfg, s, op.output(0)) - traverse(outs[0].op) + traverse_inline(s, outs[0].op, _callback) return s - -def schedule_dense_small_batch(cfg, s, C): +def _schedule_dense_small_batch(cfg, s, C): """Schedule float32/64 dense with small batch size""" A, _ = C.op.input_tensors _, in_dim = get_const_tuple(A.shape) @@ -152,7 +101,27 @@ def schedule_dense_small_batch(cfg, s, C): s[C].set_store_predicate(thread_x.var.equal(0)) s[Out].set_store_predicate(thread_x.var.equal(0)) -def schedule_dense_large_batch(cfg, s, C): + +@autotvm.register_topi_compute("dense_large_batch.cuda") +def dense_large_batch(cfg, data, weight, bias=None, out_dtype=None): + """Dense operator on CUDA""" + return nn.dense(data, weight, bias, out_dtype) + + +@autotvm.register_topi_schedule("dense_large_batch.cuda") +def schedule_dense_large_batch(cfg, outs): + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == 'dense': + _schedule_dense_large_batch(cfg, s, op.output(0)) + + traverse_inline(s, outs[0].op, _callback) + return s + + +def _schedule_dense_large_batch(cfg, s, C): """Schedule float32/64 dense with large batch size""" A, B = C.op.input_tensors batch, in_dim = get_const_tuple(A.shape) @@ -250,7 +219,8 @@ def schedule_dense_large_batch(cfg, s, C): s[BB].bind(tx, tvm.thread_axis("threadIdx.x")) s[BB].double_buffer() -@autotvm.register_topi_compute(dense, ['cuda'], ['int8']) + +@autotvm.register_topi_compute("dense_int8.cuda") def dense_int8(cfg, data, weight, bias=None, out_dtype=None): """Dense operator for int8 on CUDA""" if out_dtype is None: @@ -286,11 +256,11 @@ def dense_int8(cfg, data, weight, bias=None, out_dtype=None): return matmul -@autotvm.register_topi_schedule(generic.schedule_dense, ['cuda', 'gpu'], ['int8']) +@autotvm.register_topi_schedule("dense_int8.cuda") def schedule_dense_int8(cfg, outs): """Dense schedule for int8 on CUDA""" s = tvm.create_schedule([x.op for x in outs]) - target = tvm.target.Target.current() + target = tvm.target.current_target() outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs if "cublas" in target.libs: diff --git a/topi/python/topi/cuda/depthwise_conv2d.py b/topi/python/topi/cuda/depthwise_conv2d.py index 05e1117ac2cee..c8cd7934bd3ea 100644 --- a/topi/python/topi/cuda/depthwise_conv2d.py +++ b/topi/python/topi/cuda/depthwise_conv2d.py @@ -20,14 +20,15 @@ from tvm import autotvm from ..util import traverse_inline from .. import tag -from .. import generic, nn +from .. import nn # register original implementation of depthwise_conv2d_nchw since we don't need to change this part -autotvm.register_topi_compute(nn.depthwise_conv2d_nchw, ['cuda', 'gpu'], 'direct', - nn.depthwise_conv2d_nchw.fdefault) +@autotvm.register_topi_compute("depthwise_conv2d_nchw.cuda") +def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype): + return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_depthwise_conv2d_nchw, ['cuda', 'gpu'], 'direct') -def schedule_depthwise_conv2d_nchw_cuda(cfg, outs): +@autotvm.register_topi_schedule("depthwise_conv2d_nchw.cuda") +def schedule_depthwise_conv2d_nchw(cfg, outs): """Schedule for depthwise_conv2d nchw forward. Parameters @@ -66,7 +67,7 @@ def _callback(op): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - target.target_name, target.model, 'depthwise_conv2d_nchw', 'direct') + target.target_name, target.model, 'depthwise_conv2d_nchw.cuda') cfg.fallback_with_reference_log(ref_log) # TODO(lmzheng): A bug here, set unroll_explicit to False as workaround cfg['unroll_explicit'].val = 0 @@ -131,7 +132,6 @@ def _callback(op): traverse_inline(s, outs[0].op, _callback) return s -@generic.schedule_depthwise_conv2d_nhwc.register(["cuda", "gpu"]) def schedule_depthwise_conv2d_nhwc(outs): """Schedule for depthwise_conv2d nhwc forward. diff --git a/topi/python/topi/cuda/group_conv2d_nchw.py b/topi/python/topi/cuda/group_conv2d_nchw.py index 54e8427daf79a..24a4be5dbe929 100644 --- a/topi/python/topi/cuda/group_conv2d_nchw.py +++ b/topi/python/topi/cuda/group_conv2d_nchw.py @@ -24,15 +24,163 @@ from ..nn.pad import pad from ..nn.util import get_pad_tuple from ..util import traverse_inline, get_const_tuple, get_const_int -from .. import nn, generic +from .. import nn -autotvm.register_topi_compute(nn.group_conv2d_nchw, ['cuda', 'gpu'], 'direct', - nn.group_conv2d_nchw.fdefault) - -@autotvm.register_topi_compute(nn.group_conv2d_nchw, ['cuda', 'gpu'], ['int8']) +@autotvm.register_topi_compute("group_conv2d_nchw.cuda") def group_conv2d_nchw_cuda(cfg, data, kernel, stride, padding, dilation, groups, out_dtype='float32'): + return nn.group_conv2d_nchw(data, kernel, stride, padding, dilation, groups, out_dtype) + + +@autotvm.register_topi_schedule("group_conv2d_nchw.cuda") +def schedule_group_conv2d_nchw(cfg, outs): + """TOPI schedule callback of group conv2d for cuda gpu + + Parameters + ---------- + cfg: ConfigEntity + The config for this template + + outs: Array of Tensor + The computation graph description of conv2d + in the format of an array of tensors. + + Returns + ------- + s: Schedule + The computation schedule for group conv2d. + """ + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == "group_conv2d_nchw": + _schedule_group_conv2d_nchw_direct(cfg, s, op.output(0)) + + traverse_inline(s, outs[0].op, _callback) + return s + + +def _schedule_group_conv2d_nchw_direct(cfg, s, conv): + """Schedule group conv2d NCHW direct template""" + workload = conv.op.attrs["workload"] + groups = get_const_int(workload[6]) + num_filters = get_const_int(conv.shape[1]) + + ##### space definition begin ##### + n, f, y, x = s[conv].op.axis + rc, ry, rx = s[conv].op.reduce_axis + cfg.define_split("tile_n", n, num_outputs=4) + cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2) + cfg.define_split("tile_f", cfg.axis(num_filters // groups), num_outputs=4) + cfg.define_split("tile_y", y, num_outputs=4) + cfg.define_split("tile_x", x, num_outputs=4) + cfg.define_split("tile_rc", rc, num_outputs=2) + cfg.define_split("tile_ry", ry, num_outputs=2) + cfg.define_split("tile_rx", rx, num_outputs=2) + cfg.define_knob("auto_unroll_max_step", [0, 512, 1500]) + + target = tvm.target.current_target() + if target.target_name in ['nvptx', 'rocm']: + cfg.define_knob("unroll_explicit", [1]) + else: + cfg.define_knob("unroll_explicit", [0, 1]) + + pad_data, kernel = s[conv].op.input_tensors + + s[pad_data].compute_inline() + + if conv.op in s.outputs: + output = conv + OL = s.cache_write(conv, 'local') + else: + output = s.outputs[0].output(0) + s[conv].set_scope('local') + OL = conv + + # create cache stage + AA = s.cache_read(pad_data, 'shared', [OL]) + WW = s.cache_read(kernel, 'shared', [OL]) + + # tile and bind spatial axes + n, f, y, x = s[output].op.axis + kernel_scope, n = s[output].split(n, nparts=1) + + g, f = s[output].split(f, nparts=groups) + bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n) + bg, vg = cfg["tile_g"].apply(s, output, g) + bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f) + by, vy, ty, yi = cfg["tile_y"].apply(s, output, y) + bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x) + + s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi) + s[output].bind(bn, tvm.thread_axis("blockIdx.z")) + s[output].bind(s[output].fuse(bg, bf), tvm.thread_axis("blockIdx.y")) + s[output].bind(s[output].fuse(by, bx), tvm.thread_axis("blockIdx.x")) + s[output].bind(vn, tvm.thread_axis("vthread")) + s[output].bind(vg, tvm.thread_axis("vthread")) + s[output].bind(vf, tvm.thread_axis("vthread")) + s[output].bind(vy, tvm.thread_axis("vthread")) + s[output].bind(vx, tvm.thread_axis("vthread")) + + cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf + if cfg["fuse_yx"].val: + s[output].bind(tn, tvm.thread_axis("threadIdx.z")) + s[output].bind(tf, tvm.thread_axis("threadIdx.y")) + tyx = s[output].fuse(ty, tx) + s[output].bind(tyx, tvm.thread_axis("threadIdx.x")) + s[OL].compute_at(s[output], tyx) + + # number of threads + n_tz = cfg["tile_n"].size[2] + n_ty = cfg["tile_f"].size[2] + n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2] + else: + s[output].bind(s[output].fuse(tn, tf), tvm.thread_axis("threadIdx.z")) + s[output].bind(ty, tvm.thread_axis("threadIdx.y")) + s[output].bind(tx, tvm.thread_axis("threadIdx.x")) + s[OL].compute_at(s[output], tx) + + # number of threads + n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2] + n_ty = cfg["tile_y"].size[2] + n_tx = cfg["tile_x"].size[2] + + # tile reduction axes + n, f, y, x = s[OL].op.axis + rc, ry, rx = s[OL].op.reduce_axis + rco, rci = cfg['tile_rc'].apply(s, OL, rc) + ryo, ryi = cfg['tile_rx'].apply(s, OL, ry) + rxo, rxi = cfg['tile_ry'].apply(s, OL, rx) + s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x) + + s[AA].compute_at(s[OL], rxo) + s[WW].compute_at(s[OL], rxo) + + # cooperative fetching + for load in [AA, WW]: + n, f, y, x = s[load].op.axis + fused = s[load].fuse(n, f, y, x) + fused, tx = s[load].split(fused, factor=n_tx) + fused, ty = s[load].split(fused, factor=n_ty) + fused, tz = s[load].split(fused, factor=n_tz) + s[load].bind(tz, tvm.thread_axis("threadIdx.z")) + s[load].bind(ty, tvm.thread_axis("threadIdx.y")) + s[load].bind(tx, tvm.thread_axis("threadIdx.x")) + + # unroll + s[output].pragma(kernel_scope, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val) + s[output].pragma(kernel_scope, 'unroll_explicit', cfg['unroll_explicit'].val) + + N, CO, OH, OW = get_const_tuple(output.shape) + _, CI_div_groups, KH, KW = get_const_tuple(kernel.shape) + cfg.add_flop(2 * N * OH * OW * CO * CI_div_groups * KH * KW) + + +@autotvm.register_topi_compute("group_conv2d_NCHWc_int8.cuda") +def group_conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, groups, + out_dtype='float32'): """Group convolution operator for 'group_conv2d_NCHWc_int8'. Parameters @@ -155,29 +303,58 @@ def group_conv2d_nchw_cuda(cfg, data, kernel, stride, padding, dilation, groups, # Compared with a normal convolution, group convolution only sums # input channels from the group that an output channel resides in. conv = tvm.compute(oshape, lambda n, occ, oh, ow, ocb: - tvm.sum(pad_data[n, occ//(oc_chunk//groups)*(ic_chunk//groups)+icc, - oh*stride_h+kh*dilation_h, ow*stride_w+kw*dilation_w, icb] - .astype('int32') * - packed_kernel[occ, icc, - kh, kw, ocb, icb] - .astype('int32'), - axis=[icc, kh, kw, icb])) + tvm.sum(pad_data[n, occ//(oc_chunk//groups)*(ic_chunk//groups)+icc, + oh*stride_h+kh*dilation_h, ow*stride_w+kw*dilation_w, icb] + .astype('int32') * + packed_kernel[occ, icc, + kh, kw, ocb, icb] + .astype('int32'), + axis=[icc, kh, kw, icb])) # Type conversion output = tvm.compute(oshape, lambda *index: conv(*index).astype(out_dtype), tag='group_conv2d_NCHWc_int8') num_flop = batch * oc_chunk * oc_block * out_height * out_width * \ - ic_chunk * ic_block * kernel_h * kernel_w * 2 // groups + ic_chunk * ic_block * kernel_h * kernel_w * 2 // groups cfg.add_flop(num_flop) return output +@autotvm.register_topi_schedule("group_conv2d_NCHWc_int8.cuda") +def schedule_group_conv2d_NCHWc_int8(cfg, outs): + """TOPI schedule callback of group conv2d for cuda gpu + + Parameters + ---------- + cfg: ConfigEntity + The config for this template + + outs: Array of Tensor + The computation graph description of conv2d + in the format of an array of tensors. + + Returns + ------- + s: Schedule + The computation schedule for group conv2d. + """ + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == "group_conv2d_NCHWc_int8": + _schedule_group_conv2d_NCHWc_int8(cfg, s, op.output(0)) + + traverse_inline(s, outs[0].op, _callback) + return s + + _dp4a = dp4a('shared', 'shared', 'local') -def schedule_group_conv2d_NCHWc_int8(cfg, s, output): +def _schedule_group_conv2d_NCHWc_int8(cfg, s, output): """Schedule group conv2d int8 NCHWc template""" workload = output.op.attrs["workload"] groups = get_const_int(workload[6]) @@ -198,7 +375,7 @@ def schedule_group_conv2d_NCHWc_int8(cfg, s, output): s[packed_kernel].pragma( s[packed_kernel].op.axis[0], "debug_skip_region") else: - if isinstance(packed_kernel.op, tvm.tensor.ComputeOp) and\ + if isinstance(packed_kernel.op, tvm.tensor.ComputeOp) and \ packed_kernel.name == 'packed_kernel': # data and kernel are not pre-computed, schedule layout transform here schedule_injective_from_existing(s, packed_data) @@ -319,151 +496,3 @@ def schedule_group_conv2d_NCHWc_int8(cfg, s, output): s[output].pragma(kernel_scope, 'unroll_explicit', False) return s - - -def schedule_group_conv2d_nchw_direct(cfg, s, conv): - """Schedule group conv2d NCHW direct template""" - workload = conv.op.attrs["workload"] - groups = get_const_int(workload[6]) - num_filters = get_const_int(conv.shape[1]) - - ##### space definition begin ##### - n, f, y, x = s[conv].op.axis - rc, ry, rx = s[conv].op.reduce_axis - cfg.define_split("tile_n", n, num_outputs=4) - cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2) - cfg.define_split("tile_f", cfg.axis(num_filters // groups), num_outputs=4) - cfg.define_split("tile_y", y, num_outputs=4) - cfg.define_split("tile_x", x, num_outputs=4) - cfg.define_split("tile_rc", rc, num_outputs=2) - cfg.define_split("tile_ry", ry, num_outputs=2) - cfg.define_split("tile_rx", rx, num_outputs=2) - cfg.define_knob("auto_unroll_max_step", [0, 512, 1500]) - - target = tvm.target.Target.current() - if target.target_name in ['nvptx', 'rocm']: - cfg.define_knob("unroll_explicit", [1]) - else: - cfg.define_knob("unroll_explicit", [0, 1]) - - pad_data, kernel = s[conv].op.input_tensors - - s[pad_data].compute_inline() - - if conv.op in s.outputs: - output = conv - OL = s.cache_write(conv, 'local') - else: - output = s.outputs[0].output(0) - s[conv].set_scope('local') - OL = conv - - # create cache stage - AA = s.cache_read(pad_data, 'shared', [OL]) - WW = s.cache_read(kernel, 'shared', [OL]) - - # tile and bind spatial axes - n, f, y, x = s[output].op.axis - kernel_scope, n = s[output].split(n, nparts=1) - - g, f = s[output].split(f, nparts=groups) - bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n) - bg, vg = cfg["tile_g"].apply(s, output, g) - bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f) - by, vy, ty, yi = cfg["tile_y"].apply(s, output, y) - bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x) - - s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi) - s[output].bind(bn, tvm.thread_axis("blockIdx.z")) - s[output].bind(s[output].fuse(bg, bf), tvm.thread_axis("blockIdx.y")) - s[output].bind(s[output].fuse(by, bx), tvm.thread_axis("blockIdx.x")) - s[output].bind(vn, tvm.thread_axis("vthread")) - s[output].bind(vg, tvm.thread_axis("vthread")) - s[output].bind(vf, tvm.thread_axis("vthread")) - s[output].bind(vy, tvm.thread_axis("vthread")) - s[output].bind(vx, tvm.thread_axis("vthread")) - - cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf - if cfg["fuse_yx"].val: - s[output].bind(tn, tvm.thread_axis("threadIdx.z")) - s[output].bind(tf, tvm.thread_axis("threadIdx.y")) - tyx = s[output].fuse(ty, tx) - s[output].bind(tyx, tvm.thread_axis("threadIdx.x")) - s[OL].compute_at(s[output], tyx) - - # number of threads - n_tz = cfg["tile_n"].size[2] - n_ty = cfg["tile_f"].size[2] - n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2] - else: - s[output].bind(s[output].fuse(tn, tf), tvm.thread_axis("threadIdx.z")) - s[output].bind(ty, tvm.thread_axis("threadIdx.y")) - s[output].bind(tx, tvm.thread_axis("threadIdx.x")) - s[OL].compute_at(s[output], tx) - - # number of threads - n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2] - n_ty = cfg["tile_y"].size[2] - n_tx = cfg["tile_x"].size[2] - - # tile reduction axes - n, f, y, x = s[OL].op.axis - rc, ry, rx = s[OL].op.reduce_axis - rco, rci = cfg['tile_rc'].apply(s, OL, rc) - ryo, ryi = cfg['tile_rx'].apply(s, OL, ry) - rxo, rxi = cfg['tile_ry'].apply(s, OL, rx) - s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x) - - s[AA].compute_at(s[OL], rxo) - s[WW].compute_at(s[OL], rxo) - - # cooperative fetching - for load in [AA, WW]: - n, f, y, x = s[load].op.axis - fused = s[load].fuse(n, f, y, x) - fused, tx = s[load].split(fused, factor=n_tx) - fused, ty = s[load].split(fused, factor=n_ty) - fused, tz = s[load].split(fused, factor=n_tz) - s[load].bind(tz, tvm.thread_axis("threadIdx.z")) - s[load].bind(ty, tvm.thread_axis("threadIdx.y")) - s[load].bind(tx, tvm.thread_axis("threadIdx.x")) - - # unroll - s[output].pragma(kernel_scope, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val) - s[output].pragma(kernel_scope, 'unroll_explicit', cfg['unroll_explicit'].val) - - N, CO, OH, OW = get_const_tuple(output.shape) - _, CI_div_groups, KH, KW = get_const_tuple(kernel.shape) - cfg.add_flop(2 * N * OH * OW * CO * CI_div_groups * KH * KW) - - -@autotvm.register_topi_schedule(generic.schedule_group_conv2d_nchw, - ["cuda", "gpu"], ["int8", "direct"]) -def schedule_conv2d_nchw_cuda(cfg, outs): - """TOPI schedule callback of group conv2d for cuda gpu - - Parameters - ---------- - cfg: ConfigEntity - The config for this template - - outs: Array of Tensor - The computation graph description of conv2d - in the format of an array of tensors. - - Returns - ------- - s: Schedule - The computation schedule for group conv2d. - """ - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs - s = tvm.create_schedule([x.op for x in outs]) - - def _callback(op): - if op.tag == "group_conv2d_NCHWc_int8": - schedule_group_conv2d_NCHWc_int8(cfg, s, op.output(0)) - if op.tag == "group_conv2d_nchw": - schedule_group_conv2d_nchw_direct(cfg, s, op.output(0)) - - traverse_inline(s, outs[0].op, _callback) - return s diff --git a/topi/python/topi/cuda/injective.py b/topi/python/topi/cuda/injective.py index eb7019bd7654f..1690407a1602b 100644 --- a/topi/python/topi/cuda/injective.py +++ b/topi/python/topi/cuda/injective.py @@ -17,10 +17,8 @@ # pylint: disable=invalid-name, unused-variable, """Schedule for composition of injective operator""" import tvm -from .. import generic, util -from ..util import is_empty_shape +from .. import util -@generic.schedule_injective_from_existing.register(["cuda", "gpu"]) def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -67,7 +65,6 @@ def schedule_injective_from_existing(sch, out): return sch -@generic.schedule_injective.register(["cuda", "gpu"]) def schedule_injective(outs): """Schedule for injective op. @@ -87,7 +84,7 @@ def schedule_injective(outs): tvm.schedule.AutoInlineInjective(s) for out in outs: - if not is_empty_shape(out.shape): + if not util.is_empty_shape(out.shape): schedule_injective_from_existing(s, out) return s diff --git a/topi/python/topi/cuda/nms.py b/topi/python/topi/cuda/nms.py index 38f87a9523c8b..91482b9e628cd 100644 --- a/topi/python/topi/cuda/nms.py +++ b/topi/python/topi/cuda/nms.py @@ -23,7 +23,6 @@ from tvm import api from tvm.generic import cast from tvm.intrin import if_then_else, log, power -from topi.vision import non_max_suppression, get_valid_counts from .sort import argsort from .. import tag @@ -327,8 +326,7 @@ def get_valid_counts_ir(data, flag, idx, valid_count, out): return ib.get() -@get_valid_counts.register(["cuda", "gpu"]) -def get_valid_counts_gpu(data, score_threshold=0, id_index=0, score_index=1): +def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1): """Get valid count of bounding boxes given a score threshold. Also moves valid boxes to the top of input data. @@ -675,11 +673,10 @@ def invalid_to_bottom_ir(data, flag, idx, out): return ib.get() -@non_max_suppression.register(["cuda", "gpu"]) -def non_max_suppression_gpu(data, valid_count, max_output_size=-1, - iou_threshold=0.5, force_suppress=False, top_k=-1, - coord_start=2, score_index=1, id_index=0, - return_indices=True, invalid_to_bottom=False): +def non_max_suppression(data, valid_count, max_output_size=-1, + iou_threshold=0.5, force_suppress=False, top_k=-1, + coord_start=2, score_index=1, id_index=0, + return_indices=True, invalid_to_bottom=False): """Non-maximum suppression operator for object detection. Parameters diff --git a/topi/python/topi/cuda/nn.py b/topi/python/topi/cuda/nn.py index 327afa87edb5e..c0230ec0be481 100644 --- a/topi/python/topi/cuda/nn.py +++ b/topi/python/topi/cuda/nn.py @@ -19,10 +19,8 @@ from __future__ import absolute_import as _abs import tvm -from .. import generic from .. import cpp -@generic.schedule_lrn.register(["cuda"]) def schedule_lrn(outs): """Schedule for LRN @@ -37,6 +35,4 @@ def schedule_lrn(outs): sch: Schedule The computation schedule for the op. """ - target = tvm.target.Target.current(allow_none=False) - cpp_target = cpp.TEST_create_target(target.target_name) - return cpp.cuda.schedule_lrn(cpp_target, outs) + return cpp.cuda.schedule_lrn(outs) diff --git a/topi/python/topi/cuda/pooling.py b/topi/python/topi/cuda/pooling.py index 2bf1e6bb9ef0d..2bebd39123783 100644 --- a/topi/python/topi/cuda/pooling.py +++ b/topi/python/topi/cuda/pooling.py @@ -18,12 +18,9 @@ """Schedule for pooling operators""" import tvm from .. import tag -from .. import generic from ..util import traverse_inline - -@generic.schedule_adaptive_pool.register(["cuda", "gpu"]) def schedule_adaptive_pool(outs): """Schedule for adaptive_pool. @@ -89,7 +86,6 @@ def traverse(OP): return s -@generic.schedule_pool.register(["cuda", "gpu"]) def schedule_pool(outs, layout): """Schedule for pool. @@ -153,8 +149,7 @@ def traverse(OP): return s -@generic.schedule_pool_grad.register(['cuda', 'gpu']) -def schedule_pool_grad_cuda(outs): +def schedule_pool_grad(outs): """Schedule for pool_grad on CUDA Parameters diff --git a/topi/python/topi/cuda/rcnn/__init__.py b/topi/python/topi/cuda/rcnn/__init__.py index 42b34f0a31e64..da55b070a8072 100644 --- a/topi/python/topi/cuda/rcnn/__init__.py +++ b/topi/python/topi/cuda/rcnn/__init__.py @@ -17,4 +17,4 @@ # pylint: disable=wildcard-import """Faster R-CNN and Mask R-CNN operators""" -from .proposal import * +from .proposal import proposal diff --git a/topi/python/topi/cuda/rcnn/proposal.py b/topi/python/topi/cuda/rcnn/proposal.py index 4344226d787e5..71f9c4ac305eb 100644 --- a/topi/python/topi/cuda/rcnn/proposal.py +++ b/topi/python/topi/cuda/rcnn/proposal.py @@ -308,9 +308,8 @@ def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf): return body -@proposal.register("cuda") -def proposal_cuda(cls_prob, bbox_pred, im_info, scales, ratios, feature_stride, threshold, - rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_min_size, iou_loss): +def proposal(cls_prob, bbox_pred, im_info, scales, ratios, feature_stride, threshold, + rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_min_size, iou_loss): """Proposal operator. Parameters diff --git a/topi/python/topi/cuda/reduction.py b/topi/python/topi/cuda/reduction.py index 69c685cb50b4c..0b9d5885375e8 100644 --- a/topi/python/topi/cuda/reduction.py +++ b/topi/python/topi/cuda/reduction.py @@ -19,7 +19,6 @@ from __future__ import absolute_import as _abs import tvm from .. import tag -from .. import generic from .injective import schedule_injective_from_existing def _schedule_reduce(op, sch, is_idx_reduce=False): @@ -89,7 +88,6 @@ def _schedule_reduce(op, sch, is_idx_reduce=False): return sch -@generic.schedule_reduce.register(["cuda", "gpu"]) def schedule_reduce(outs): """Schedule for inject->reduce->bcast ops. diff --git a/topi/python/topi/cuda/softmax.py b/topi/python/topi/cuda/softmax.py index 26a1baffa092c..afd11ea0e71e1 100644 --- a/topi/python/topi/cuda/softmax.py +++ b/topi/python/topi/cuda/softmax.py @@ -17,10 +17,9 @@ # pylint: disable=invalid-name, unused-variable, trailing-whitespace """Schedule for softmax operator""" import tvm -from .. import generic from .injective import schedule_injective_from_existing -@generic.schedule_softmax.register(["cuda", "gpu"]) + def schedule_softmax(outs): """Schedule for softmax op. diff --git a/topi/python/topi/cuda/sort.py b/topi/python/topi/cuda/sort.py index b32cce75362f8..88ca9d876abce 100644 --- a/topi/python/topi/cuda/sort.py +++ b/topi/python/topi/cuda/sort.py @@ -19,10 +19,9 @@ import tvm from tvm import api -from ..sort import argsort, topk +from .injective import schedule_injective_from_existing from ..math import identity from ..transform import strided_slice -from .. import generic from .. import tag def _schedule_sort(outs): @@ -42,8 +41,7 @@ def _schedule_sort(outs): outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) scheduled_ops = [] - # pylint: disable=import-outside-toplevel - from .injective import schedule_injective_from_existing + def traverse(op): if tag.is_injective(op.tag): schedule_injective_from_existing(s, op.output(0)) @@ -239,8 +237,7 @@ def sort_nms_ir(data, valid_count, output, axis, is_ascend): return ib.get() -@argsort.register(["cuda", "gpu"]) -def argsort_gpu(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"): +def argsort(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"): """Performs sorting along the given axis and returns an array of indicies having same shape as an input array that index data in sorted order. @@ -294,7 +291,6 @@ def argsort_gpu(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"): tag="argsort_gpu")[1] return out -@generic.schedule_argsort.register(["cuda", "gpu"]) def schedule_argsort(outs): """Schedule for argsort operator. @@ -311,8 +307,7 @@ def schedule_argsort(outs): """ return _schedule_sort(outs) -@topk.register(["cuda", "gpu"]) -def topk_gpu(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"): +def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"): """Get the top k elements in an input tensor along the given axis. Parameters @@ -389,7 +384,6 @@ def topk_gpu(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64" return output -@generic.schedule_topk.register(["cuda", "gpu"]) def schedule_topk(outs): """Schedule for argsort operator. diff --git a/topi/python/topi/cuda/ssd/multibox.py b/topi/python/topi/cuda/ssd/multibox.py index 10ba7a1051ea6..0b3f50ba0031b 100644 --- a/topi/python/topi/cuda/ssd/multibox.py +++ b/topi/python/topi/cuda/ssd/multibox.py @@ -25,9 +25,6 @@ import topi -from topi.vision.ssd import multibox_prior -from topi.vision.ssd import multibox_detection -from topi.vision.ssd import multibox_transform_loc from ..nms import non_max_suppression @@ -112,9 +109,8 @@ def multibox_prior_ir(data, out, sizes, ratios, steps, offsets): return body -@multibox_prior.register(["cuda", "gpu"]) -def multibox_prior_gpu(data, sizes=(1,), ratios=(1,), steps=(-1, -1), - offsets=(0.5, 0.5), clip=False): +def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), + offsets=(0.5, 0.5), clip=False): """Generate prior(anchor) boxes from data, sizes and ratios. Parameters @@ -346,9 +342,8 @@ def transform_loc(loc, loc_base_idx, anchor, anchor_base_idx, clip, vx, vy, vw, return ib.get() -@multibox_transform_loc.register(["cuda", "gpu"]) -def multibox_transform_loc_gpu(cls_prob, loc_pred, anchor, clip=True, \ - threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)): +def multibox_transform_loc(cls_prob, loc_pred, anchor, clip=True, \ + threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)): """Location transformation for multibox detection Parameters @@ -426,9 +421,8 @@ def multibox_transform_loc_gpu(cls_prob, loc_pred, anchor, clip=True, \ return [out_loc, valid_count] -@multibox_detection.register(["cuda", "gpu"]) -def multibox_detection_gpu(cls_prob, loc_pred, anchor, clip=True, threshold=0.01, nms_threshold=0.5, - force_suppress=False, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=-1): +def multibox_detection(cls_prob, loc_pred, anchor, clip=True, threshold=0.01, nms_threshold=0.5, + force_suppress=False, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=-1): """Convert multibox detection predictions. Parameters diff --git a/topi/python/topi/cuda/vision.py b/topi/python/topi/cuda/vision.py index d456aadf4f5ef..499288829e445 100644 --- a/topi/python/topi/cuda/vision.py +++ b/topi/python/topi/cuda/vision.py @@ -22,13 +22,13 @@ from .. import cpp from .. import tag from .pooling import schedule_pool +from .injective import schedule_injective_from_existing def _default_schedule(outs): """Default schedule for gpu.""" outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) scheduled_ops = [] - from .injective import schedule_injective_from_existing def traverse(op): if tag.is_broadcast(op.tag) or op.tag in ['bbox_score', 'sorted_bbox']: schedule_injective_from_existing(s, op.output(0)) @@ -39,7 +39,6 @@ def traverse(op): traverse(outs[0].op) return s -@generic.schedule_reorg.register(["cuda", "gpu"]) def schedule_reorg(outs): """Schedule for reorg operator. Parameters @@ -57,7 +56,6 @@ def schedule_reorg(outs): cpp_target = cpp.TEST_create_target(target.target_name) return cpp.cuda.schedule_injective(cpp_target, outs) -@generic.schedule_nms.register(["cuda", "gpu"]) def schedule_nms(outs): """Schedule for non-maximum suppression @@ -74,7 +72,6 @@ def schedule_nms(outs): """ return _default_schedule(outs) -@generic.schedule_multibox_prior.register(["cuda", "gpu"]) def schedule_multibox_prior(outs): """Schedule for multibox_prior operator. @@ -91,7 +88,6 @@ def schedule_multibox_prior(outs): """ return _default_schedule(outs) -@generic.schedule_multibox_transform_loc.register(["cuda", "gpu"]) def schedule_multibox_transform_loc(outs): """Schedule for multibox_transform_loc @@ -109,7 +105,6 @@ def schedule_multibox_transform_loc(outs): """ return _default_schedule(outs) -@generic.schedule_multibox_detection.register(["cuda", "gpu"]) def schedule_multibox_detection(outs): """Schedule for multibox_detection operator. @@ -126,15 +121,12 @@ def schedule_multibox_detection(outs): """ return _default_schedule(outs) -@generic.schedule_roi_align.register(["cuda", "gpu"]) def schedule_roi_align(outs): return schedule_pool(outs, 'NCHW') -@generic.schedule_roi_pool.register(["cuda", "gpu"]) def schedule_roi_pool(outs): return schedule_pool(outs, 'NCHW') -@generic.schedule_proposal.register(["cuda", "gpu"]) def schedule_proposal(outs): """Schedule for proposal operator. @@ -151,7 +143,6 @@ def schedule_proposal(outs): """ return _default_schedule(outs) -@generic.schedule_get_valid_counts.register(["cuda", "gpu"]) def schedule_get_valid_counts(outs): """Schedule for get_valid_counts operator. diff --git a/topi/python/topi/generic/conv2d.py b/topi/python/topi/generic/conv2d.py index 332c2fdad4595..08bb06c6f8558 100644 --- a/topi/python/topi/generic/conv2d.py +++ b/topi/python/topi/generic/conv2d.py @@ -19,6 +19,7 @@ """Generic convolution schedules""" from __future__ import absolute_import as _abs import tvm +from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity from ..util import get_const_tuple @@ -109,7 +110,8 @@ def fallback_schedule_cpu_1x1_int8(cfg, wkl, int32_lanes, num_int8_elements): raise ValueError("cannot decide default schedule for workload: {}".format(wkl)) -def schedule_conv_NCHWc_cpu_common_int8(s, cfg, data, conv_out, last, int32_lanes=16, intrin=None): +def schedule_conv_NCHWc_cpu_common_int8(s, cfg, data_vec, kernel_vec, conv_out, + last, int32_lanes=16, intrin=None): """ Defines the schedule for INT8 for Intel and ARM machines Uses the Intel/ARM intrinsics to use INT8 operations @@ -117,14 +119,39 @@ def schedule_conv_NCHWc_cpu_common_int8(s, cfg, data, conv_out, last, int32_lane lower-numerical-precision-deep-learning-inference-and-training """ reg_n, unroll_kw = cfg["tile_ow"].size[-1], cfg["unroll_kw"].val - _, _, _, _, ic_bn = get_const_tuple(data.shape) + _, _, _, _, ic_bn = get_const_tuple(data_vec.shape) _, _, _, _, oc_bn = get_const_tuple(conv_out.shape) - A = data - if isinstance(s[A].op, tvm.tensor.ComputeOp): - batch, ic_chunk, ih, iw, _ = s[A].op.axis - parallel_axis = s[A].fuse(batch, ic_chunk, ih) - s[A].parallel(parallel_axis) + # schedule pad + if isinstance(s[data_vec].op, tvm.tensor.ComputeOp) \ + and "pad" in data_vec.op.tag: + batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + data_vec = data_vec.op.input_tensors[0] + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make records accurate. + # this part will be folded during Relay fold_constant pass. + s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region") + s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region") + elif isinstance(kernel_vec.op, tvm.tensor.ComputeOp) and \ + kernel_vec.name == 'kernel_vec': + # data and kernel are not pre-computed, schedule layout transform here. + # this should only be used by x86 conv2d_nchw, which is for + # testing purpose. + batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + + oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis + s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) + oc_bn = cfg["tile_oc"].size[-1] + if oc_bn > 1: + s[kernel_vec].vectorize(oc_block) + parallel_axis = s[kernel_vec].fuse(oc_chunk, oh) + s[kernel_vec].parallel(parallel_axis) # schedule 5-D NCHW[x]c conv C, O = conv_out, last @@ -173,7 +200,8 @@ def schedule_conv_NCHWc_cpu_common_int8(s, cfg, data, conv_out, last, int32_lane return s -def schedule_conv_NCHWc_cpu_1x1_int8(s, cfg, data, conv_out, last, int32_lanes=16, intrin=None): +def schedule_conv_NCHWc_cpu_1x1_int8(s, cfg, data_vec, kernel_vec, conv_out, + last, int32_lanes=16, intrin=None): """ Defines the 1x1 conv schedule for INT8 for Intel and ARM machines Uses the Intel/ARM intrinsics to use INT8 operations @@ -181,15 +209,39 @@ def schedule_conv_NCHWc_cpu_1x1_int8(s, cfg, data, conv_out, last, int32_lanes=1 lower-numerical-precision-deep-learning-inference-and-training """ oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1] - _, _, _, _, ic_bn = get_const_tuple(data.shape) + _, _, _, _, ic_bn = get_const_tuple(data_vec.shape) _, _, _, _, oc_bn = get_const_tuple(conv_out.shape) - # schedule data - A = data - if isinstance(s[A].op, tvm.tensor.ComputeOp): - batch, ic_chunk, ih, iw, ic_block = s[A].op.axis - parallel_axis = s[A].fuse(batch, ic_chunk, ih) - s[A].parallel(parallel_axis) + # schedule pad + if isinstance(s[data_vec].op, tvm.tensor.ComputeOp) \ + and "pad" in data_vec.op.tag: + batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + data_vec = data_vec.op.input_tensors[0] + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make records accurate. + # this part will be folded during Relay fold_constant pass. + s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region") + s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region") + elif isinstance(kernel_vec.op, tvm.tensor.ComputeOp) and \ + kernel_vec.name == 'kernel_vec': + # data and kernel are not pre-computed, schedule layout transform here. + # this should only be used by x86 conv2d_nchw, which is for + # testing purpose. + batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + + oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis + s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) + oc_bn = cfg["tile_oc"].size[-1] + if oc_bn > 1: + s[kernel_vec].vectorize(oc_block) + parallel_axis = s[kernel_vec].fuse(oc_chunk, oh) + s[kernel_vec].parallel(parallel_axis) C, O = conv_out, last CC = s.cache_write(C, 'global') diff --git a/topi/python/topi/generic/extern.py b/topi/python/topi/generic/extern.py index e895385e8b66f..977c53763a523 100644 --- a/topi/python/topi/generic/extern.py +++ b/topi/python/topi/generic/extern.py @@ -21,7 +21,6 @@ import tvm from .. import cpp -@tvm.target.generic_func def schedule_extern(outs): """Schedule for an extern op followed by injective operations. diff --git a/topi/python/topi/generic/injective.py b/topi/python/topi/generic/injective.py index 2aff96f9636c1..6f1013c06dbd5 100644 --- a/topi/python/topi/generic/injective.py +++ b/topi/python/topi/generic/injective.py @@ -20,7 +20,6 @@ import tvm -@tvm.target.override_native_generic_func("schedule_injective_from_existing") def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -36,10 +35,9 @@ def schedule_injective_from_existing(sch, out): sch: Schedule The updated schedule. """ - sch[out].fuse(s[out].op.axis) + sch[out].fuse(*sch[out].op.axis) return sch -@tvm.target.override_native_generic_func("schedule_injective") def schedule_injective(outs): """Schedule for injective op. @@ -64,22 +62,5 @@ def schedule_injective(outs): schedule_injective_from_existing(s, x) return s -@tvm.target.generic_func -def schedule_concatenate(outs): - """Schedule for concatenate op. - - Parameters - ---------- - outs: Array of Tensor - The computation graph description of reduce in the format - of an array of tensors. - - Returns - ------- - sch: Schedule - The computation schedule for the op. - """ - return schedule_injective(outs) - schedule_elemwise = schedule_injective schedule_broadcast = schedule_injective diff --git a/topi/python/topi/generic/nn.py b/topi/python/topi/generic/nn.py index 8831829412027..ab926e8fb162a 100644 --- a/topi/python/topi/generic/nn.py +++ b/topi/python/topi/generic/nn.py @@ -18,7 +18,6 @@ """Generic nn operators""" from __future__ import absolute_import as _abs import tvm -from .. import cpp def _default_schedule(outs, auto_inline): """Default schedule for llvm.""" @@ -34,7 +33,6 @@ def _default_schedule(outs, auto_inline): return s -@tvm.target.generic_func def schedule_conv1d_ncw(outs): """Schedule for conv1d_ncw @@ -52,7 +50,6 @@ def schedule_conv1d_ncw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv1d_nwc(outs): """Schedule for conv1d_nwc @@ -70,7 +67,6 @@ def schedule_conv1d_nwc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_hwcn(outs): """Schedule for conv2d_hwcn @@ -88,7 +84,6 @@ def schedule_conv2d_hwcn(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_nchw(outs): """Schedule for conv2d_nchw @@ -106,7 +101,6 @@ def schedule_conv2d_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_nhwc_pack(outs): """Schedule for conv2d_nhwc_pack @@ -124,7 +118,6 @@ def schedule_conv2d_nhwc_pack(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_nhwc(outs): """Schedule for conv2d_nhwc @@ -142,7 +135,6 @@ def schedule_conv2d_nhwc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_NCHWc(outs): """Schedule for conv2d_NCHW[x]c @@ -161,7 +153,6 @@ def schedule_conv2d_NCHWc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_NCHWc_int8(outs): """Schedule for conv2d_NCHW[x]c_int8 @@ -180,7 +171,6 @@ def schedule_conv2d_NCHWc_int8(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_winograd_weight_transform(outs): """Schedule for weight transformation of winograd @@ -210,7 +200,6 @@ def schedule_conv2d_winograd_weight_transform(outs): return s -@tvm.target.generic_func def schedule_conv2d_winograd_without_weight_transform(outs): """Schedule for winograd without weight transformation @@ -228,7 +217,6 @@ def schedule_conv2d_winograd_without_weight_transform(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_winograd_nnpack_weight_transform(outs): """Schedule for weight transformation of winograd Parameters @@ -245,23 +233,7 @@ def schedule_conv2d_winograd_nnpack_weight_transform(outs): s = tvm.create_schedule([x.op for x in outs]) return s -@tvm.target.generic_func -def schedule_conv2d_winograd_nnpack_without_weight_transform(outs): - """Schedule for winograd without weight transformation - Parameters - ---------- - outs: Array of Tensor - The computation graph description of this operator - in the format of an array of tensors. - Returns - ------- - sch: Schedule - The computation schedule for the op. - """ - return _default_schedule(outs, False) - -@tvm.target.generic_func def schedule_conv3d_ncdhw(outs): """Schedule for conv3d_ncdhw @@ -278,7 +250,6 @@ def schedule_conv3d_ncdhw(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv3d_ndhwc(outs): """Schedule for conv3d_ndhwc @@ -295,7 +266,6 @@ def schedule_conv3d_ndhwc(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv2d_transpose_nchw(outs): """Schedule for conv2d_transpose_nchw @@ -313,7 +283,6 @@ def schedule_conv2d_transpose_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_conv1d_transpose_ncw(outs): """Schedule for conv1d_transpose_ncw @@ -331,7 +300,6 @@ def schedule_conv1d_transpose_ncw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_depthwise_conv2d_nchw(outs): """Schedule for depthwise_conv2d_nchw @@ -349,7 +317,6 @@ def schedule_depthwise_conv2d_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_depthwise_conv2d_nhwc(outs): """Schedule for depthwise_conv2d_nhwc Parameters @@ -366,7 +333,6 @@ def schedule_depthwise_conv2d_nhwc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_depthwise_conv2d_NCHWc(outs): """Schedule for depthwise_conv2d_NCHWc Parameters @@ -383,7 +349,6 @@ def schedule_depthwise_conv2d_NCHWc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_group_conv2d_nchw(outs): """Schedule for group_conv2d_nchw @@ -401,7 +366,6 @@ def schedule_group_conv2d_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_deformable_conv2d_nchw(outs): """Schedule for deformable_conv2d_nchw @@ -419,7 +383,6 @@ def schedule_deformable_conv2d_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_bitserial_conv2d_nchw(outs): """Schedule for bitserial_conv2d_nchw @@ -437,7 +400,6 @@ def schedule_bitserial_conv2d_nchw(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_bitserial_conv2d_nhwc(outs): """Schedule for bitserial_conv2d_nhwc @@ -455,7 +417,6 @@ def schedule_bitserial_conv2d_nhwc(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_bitserial_dense(outs): """Schedule for bitserial_dense Parameters @@ -471,7 +432,6 @@ def schedule_bitserial_dense(outs): return _default_schedule(outs, False) -@tvm.target.override_native_generic_func("schedule_reduce") def schedule_reduce(outs): """Schedule for reduction @@ -489,7 +449,6 @@ def schedule_reduce(outs): return _default_schedule(outs, True) -@tvm.target.override_native_generic_func("schedule_softmax") def schedule_softmax(outs): """Schedule for softmax @@ -507,7 +466,6 @@ def schedule_softmax(outs): return _default_schedule(outs, False) -@tvm.target.override_native_generic_func("schedule_dense") def schedule_dense(outs): """Schedule for dense @@ -525,7 +483,6 @@ def schedule_dense(outs): return _default_schedule(outs, False) -@tvm.target.override_native_generic_func("schedule_pool") def schedule_pool(outs, layout): """Schedule for pool @@ -546,7 +503,6 @@ def schedule_pool(outs, layout): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_pool_grad(outs): """Schedule for pool_grad @@ -559,7 +515,6 @@ def schedule_pool_grad(outs): return _default_schedule(outs, False) -@tvm.target.override_native_generic_func("schedule_adaptive_pool") def schedule_adaptive_pool(outs): """Schedule for adaptive pool @@ -595,7 +550,6 @@ def schedule_binarize_pack(outs): return _default_schedule(outs, False) -@tvm.target.override_native_generic_func("schedule_bitpack") def schedule_bitpack(outs): """Schedule for bitpack Parameters @@ -630,7 +584,6 @@ def schedule_binary_dense(outs): return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_lrn(outs): """Schedule for lrn @@ -645,12 +598,9 @@ def schedule_lrn(outs): sch: Schedule The computation schedule for the op. """ - target = tvm.target.Target.current(allow_none=False) - cpp_target = cpp.TEST_create_target(target.target_name) - return cpp.generic.default_schedule(cpp_target, outs, False) + return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_sparse_dense(outs): """Schedule for sparse_dense @@ -667,7 +617,7 @@ def schedule_sparse_dense(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func + def schedule_sparse_transpose(outs): """Schedule for sparse_transpose @@ -684,8 +634,19 @@ def schedule_sparse_transpose(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func + def schedule_batch_matmul(outs): - target = tvm.target.Target.current(allow_none=False) - cpp_target = cpp.TEST_create_target(target.target_name) - return cpp.generic.default_schedule(cpp_target, outs, False) + """Schedule for batch_matmul + + Parameters + ---------- + outs: Array of Tensor + The computation graph description of sparse_transpose + in the format of an array of tensors. + + Returns + ------- + sch: Schedule + The computation schedule for the op. + """ + return _default_schedule(outs, False) diff --git a/topi/python/topi/generic/search.py b/topi/python/topi/generic/search.py index 41045e492e539..69f236684bb3b 100644 --- a/topi/python/topi/generic/search.py +++ b/topi/python/topi/generic/search.py @@ -17,10 +17,8 @@ # pylint: disable=invalid-name, no-member """Generic search operators""" from __future__ import absolute_import as _abs -import tvm from .vision import _default_schedule -@tvm.target.generic_func def schedule_argwhere(outs): """Schedule for argwhere operator. diff --git a/topi/python/topi/generic/sort.py b/topi/python/topi/generic/sort.py index 5462f2ce917c2..e28ab2c8b20c6 100644 --- a/topi/python/topi/generic/sort.py +++ b/topi/python/topi/generic/sort.py @@ -20,7 +20,6 @@ import tvm from .vision import _default_schedule -@tvm.target.generic_func def schedule_argsort(outs): """Schedule for argsort operator. @@ -37,7 +36,6 @@ def schedule_argsort(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_topk(outs): """Schedule for topk operator. diff --git a/topi/python/topi/generic/vision.py b/topi/python/topi/generic/vision.py index 85d9153e6424b..d6e80df9b89d6 100644 --- a/topi/python/topi/generic/vision.py +++ b/topi/python/topi/generic/vision.py @@ -33,7 +33,6 @@ def _default_schedule(outs, auto_inline): s[x].fuse(s[x].op.axis) return s -@tvm.target.generic_func def schedule_reorg(outs): """Schedule for reorg @@ -52,7 +51,6 @@ def schedule_reorg(outs): cpp_target = cpp.TEST_create_target(target.target_name) return cpp.generic.default_schedule(cpp_target, outs, False) -@tvm.target.generic_func def schedule_get_valid_counts(outs): """Schedule for get_valid_counts @@ -69,7 +67,6 @@ def schedule_get_valid_counts(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_nms(outs): """Schedule for non-maximum suppression @@ -86,7 +83,6 @@ def schedule_nms(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_multibox_prior(outs): """Schedule for multibox_prior @@ -103,7 +99,6 @@ def schedule_multibox_prior(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_multibox_transform_loc(outs): """Schedule for multibox_transform_loc @@ -121,7 +116,6 @@ def schedule_multibox_transform_loc(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_multibox_detection(outs): """Schedule for multibox_detection @@ -138,7 +132,6 @@ def schedule_multibox_detection(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_roi_align(outs): """Schedule for roi_align @@ -155,7 +148,6 @@ def schedule_roi_align(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_roi_pool(outs): """Schedule for roi_align @@ -172,7 +164,6 @@ def schedule_roi_pool(outs): """ return _default_schedule(outs, False) -@tvm.target.generic_func def schedule_proposal(outs): """Schedule for proposal operator. diff --git a/topi/python/topi/hls/injective.py b/topi/python/topi/hls/injective.py index de584287a90eb..d4ccf41ed26da 100644 --- a/topi/python/topi/hls/injective.py +++ b/topi/python/topi/hls/injective.py @@ -17,9 +17,7 @@ # pylint: disable=invalid-name, unused-variable, """Schedule for composition of injective operator""" import tvm -from .. import generic -@generic.schedule_injective_from_existing.register(["hls"]) def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -40,7 +38,6 @@ def schedule_injective_from_existing(sch, out): sch[out].bind(px, tvm.thread_axis("pipeline")) return sch -@generic.schedule_injective.register(["hls"]) def schedule_injective(outs): """Schedule for injective op. diff --git a/topi/python/topi/hls/nn.py b/topi/python/topi/hls/nn.py index d73cb9c847f75..06cf3298682dd 100644 --- a/topi/python/topi/hls/nn.py +++ b/topi/python/topi/hls/nn.py @@ -19,7 +19,6 @@ from __future__ import absolute_import as _abs import tvm from .. import tag -from .. import generic def _schedule_conv2d(outs): @@ -52,7 +51,6 @@ def traverse(OP): return s -@generic.schedule_conv2d_nchw.register(["hls"]) def schedule_conv2d_nchw(outs): """Schedule for conv2d_nchw @@ -70,7 +68,6 @@ def schedule_conv2d_nchw(outs): return _schedule_conv2d(outs) -@generic.schedule_conv2d_nhwc.register(["hls"]) def schedule_conv2d_nhwc(outs): """Schedule for conv2d_nhwc @@ -88,7 +85,6 @@ def schedule_conv2d_nhwc(outs): return _schedule_conv2d(outs) -@generic.schedule_conv2d_NCHWc.register(["hls"]) def schedule_conv2d_NCHWc(outs): """Schedule for conv2d_NCHW[x]c @@ -106,7 +102,6 @@ def schedule_conv2d_NCHWc(outs): return _schedule_conv2d(outs) -@generic.schedule_conv2d_transpose_nchw.register(["hls"]) def schedule_conv2d_transpose_nchw(outs): """Schedule for conv2d_transpose_nchw @@ -124,7 +119,6 @@ def schedule_conv2d_transpose_nchw(outs): return _schedule_conv2d(outs) -@generic.schedule_depthwise_conv2d_nchw.register(["hls"]) def schedule_depthwise_conv2d_nchw(outs): """Schedule for depthwise_conv2d_nchw @@ -142,7 +136,6 @@ def schedule_depthwise_conv2d_nchw(outs): return _schedule_conv2d(outs) -@generic.schedule_depthwise_conv2d_nhwc.register(["hls"]) def schedule_depthwise_conv2d_nhwc(outs): """Schedule for depthwise_conv2d_nhwc Parameters @@ -158,7 +151,6 @@ def schedule_depthwise_conv2d_nhwc(outs): """ return _schedule_conv2d(outs) -@generic.schedule_bitserial_conv2d_nchw.register(["hls"]) def schedule_bitserial_conv2d_nchw(outs): """Schedule for bitserial_conv2d_nchw @@ -176,7 +168,6 @@ def schedule_bitserial_conv2d_nchw(outs): return _schedule_conv2d(outs) -@generic.schedule_bitserial_conv2d_nhwc.register(["hls"]) def schedule_bitserial_conv2d_nhwc(outs): """Schedule for bitserial_conv2d_nhwc @@ -194,7 +185,6 @@ def schedule_bitserial_conv2d_nhwc(outs): return _schedule_conv2d(outs) -@generic.schedule_reduce.register(["hls"]) def schedule_reduce(outs): """Schedule for reduction @@ -241,7 +231,6 @@ def traverse(OP): return s -@generic.schedule_softmax.register(["hls"]) def schedule_softmax(outs): """Schedule for softmax @@ -286,7 +275,6 @@ def schedule_softmax(outs): return s -@generic.schedule_dense.register(["hls"]) def schedule_dense(outs): """Schedule for dense @@ -330,7 +318,6 @@ def traverse(OP): return s -@generic.schedule_pool.register(["hls"]) def schedule_pool(outs, layout): """Schedule for pool @@ -374,7 +361,6 @@ def traverse(OP): return s -@generic.schedule_adaptive_pool.register(["hls"]) def schedule_adaptive_pool(outs): """Schedule for adaptive_pool diff --git a/topi/python/topi/intel_graphics/__init__.py b/topi/python/topi/intel_graphics/__init__.py index 5223d2d2bbc93..5f82fe7587866 100644 --- a/topi/python/topi/intel_graphics/__init__.py +++ b/topi/python/topi/intel_graphics/__init__.py @@ -20,3 +20,5 @@ from __future__ import absolute_import as _abs from .conv2d import * +from . import conv2d_alter_op +from .depthwise_conv2d import * diff --git a/topi/python/topi/intel_graphics/conv2d.py b/topi/python/topi/intel_graphics/conv2d.py index 65ea590905f91..0a0dc468f31a2 100644 --- a/topi/python/topi/intel_graphics/conv2d.py +++ b/topi/python/topi/intel_graphics/conv2d.py @@ -20,35 +20,28 @@ from __future__ import absolute_import as _abs import tvm - from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity -from tvm.autotvm.task.topi_integration import deserialize_args -from tvm.autotvm.task import get_config -from ..nn.conv2d import conv2d, conv2d_NCHWc, conv2d_alter_layout, conv2d_infer_layout -from ..nn.util import get_pad_tuple -from ..nn.depthwise_conv2d import depthwise_conv2d_nchw -from ..nn import pad -from .. import tag -from .. import generic + +from .. import nn from .. import util -from ..util import simplify, get_const_tuple +from ..util import simplify, get_const_tuple, traverse_inline def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False): if is_depthwise: raise RuntimeError("Depthwise not supported for intel graphics.") + else: + batch_size, in_channel, height, width = get_const_tuple(data.shape) + out_channel, _, hkernel, _ = get_const_tuple(kernel.shape) + HSTR, _ = strides - batch_size, in_channel, height, width = get_const_tuple(data.shape) - out_channel, _, hkernel, _ = get_const_tuple(kernel.shape) - HSTR, _ = strides - - ic_bn = 1 - oc_bn, oc_bn_upper = 16, 16 - for i in range(oc_bn_upper, 0, -1): - if out_channel % i == 0: - oc_bn = i - break + ic_bn = 1 + oc_bn, oc_bn_upper = 16, 16 + for i in range(oc_bn_upper, 0, -1): + if out_channel % i == 0: + oc_bn = i + break if HSTR == 2: if out_channel + hkernel == 515: @@ -73,17 +66,12 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depth cfg["block_ow"] = OtherOptionEntity(block_ow) -def _create_schedule_template(cfg, data, kernel, strides, padding, dilation, layout): +def _create_schedule_template(cfg, dshape, kshape, strides, padding, dilation): """Create schedule configuration from input arguments""" - dshape = get_const_tuple(data.shape) - kshape = get_const_tuple(kernel.shape) - if layout == 'NCHW': - n, ic, h, w = dshape - oc, _, kh, kw = kshape - else: - raise ValueError("Not support this layout {} with " - "schedule template.".format(layout)) - pt, pl, pb, pr = get_pad_tuple(padding, kernel) + n, ic, h, w = dshape + oc, _, kh, kw = kshape + + pt, pl, pb, pr = nn.get_pad_tuple(padding, (kh, kw)) sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides) oh = (h - kh + pt + pb) // sh + 1 ow = (w - kw + pl + pr) // sw + 1 @@ -159,108 +147,59 @@ def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None # We define schedule template in this function instead of # declaration function since actual input arguments need # to be altered by the schedule selected. -@autotvm.task.register("topi_intel_graphics_conv2d_NCHWc") -def __topi_nn_conv2d_NCHWc(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - data, kernel, strides, padding, dilation, layout, dtype = deserialize_args(args) - raw_data_shape = get_const_tuple(data.shape) - raw_kernel_shape = get_const_tuple(kernel.shape) - - # get config here - cfg = get_config() - _create_schedule_template(cfg, data, kernel, strides, padding, dilation, layout) - cfg.add_flop(1) - - # change shape with the value in config - ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1] - oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1] +# @autotvm.task.register("topi_intel_graphics_conv2d_NCHWc") +# def __topi_nn_conv2d_NCHWc(*args, **kwargs): +# assert not kwargs, "Do not support kwargs in template function call" +# data, kernel, strides, padding, dilation, layout, dtype = deserialize_args(args) +# raw_data_shape = get_const_tuple(data.shape) +# raw_kernel_shape = get_const_tuple(kernel.shape) +# +# # get config here +# cfg = get_config() +# _create_schedule_template(cfg, data, kernel, strides, padding, dilation, layout) +# cfg.add_flop(1) +# +# # change shape with the value in config +# ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1] +# oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1] +# +# new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn, +# raw_data_shape[2], raw_data_shape[3], ic_bn) +# new_kernel_shape = (raw_kernel_shape[0] // oc_bn, raw_kernel_shape[1] // ic_bn, +# raw_kernel_shape[2], raw_kernel_shape[3], ic_bn, oc_bn) +# new_data = tvm.placeholder(new_data_shape, data.dtype) +# new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype) +# +# C = _decl_cl_spatialpack_NCHWc(cfg, new_data, new_kernel, strides, padding, dilation, dtype) +# s = _schedule_conv2d_NCHWc(cfg, [C]) +# +# return s, [new_data, new_kernel, C] - new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn, - raw_data_shape[2], raw_data_shape[3], ic_bn) - new_kernel_shape = (raw_kernel_shape[0] // oc_bn, raw_kernel_shape[1] // ic_bn, - raw_kernel_shape[2], raw_kernel_shape[3], ic_bn, oc_bn) - new_data = tvm.placeholder(new_data_shape, data.dtype) - new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype) - - C = _decl_cl_spatialpack_NCHWc(cfg, new_data, new_kernel, strides, padding, dilation, dtype) - s = _schedule_conv2d_NCHWc(cfg, [C]) - - return s, [new_data, new_kernel, C] - -@conv2d_alter_layout.register(["intel_graphics"]) -def _alter_conv2d_layout(attrs, inputs, tinfo, F): - copy_inputs = list(inputs) - new_attrs = {k : attrs[k] for k in attrs.keys()} - - if F.__name__ == 'tvm.relay.op': - # Derive channels for frontends (e.g ONNX) that miss "channel" field. - new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')] - - data, kernel = tinfo[0], tinfo[1] - batch_size, in_channel, height, width = get_const_tuple(data.shape) - - groups = attrs.get_int("groups") - out_channel = attrs.get_int("channels") - padding = attrs.get_int_tuple("padding") - strides = attrs.get_int_tuple("strides") - dilation = attrs.get_int_tuple("dilation") - out_dtype = attrs["out_dtype"] - - layout_name = 'data_layout' - layout = attrs[layout_name] - kh, kw = attrs.get_int_tuple("kernel_size") - - dtype = data.dtype - out_dtype = dtype if out_dtype in ("same", "") else out_dtype - is_depthwise = groups == in_channel and groups == out_channel - - # only optimize for NCHW - if layout != 'NCHW': - return None - if groups != 1 and not is_depthwise: - return None - - dispatch_ctx = autotvm.task.DispatchContext.current - target = tvm.target.Target.current() - - # query schedule and fallback if necessary - workload = autotvm.task.args_to_workload( - [data, kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw) \ - if is_depthwise else \ - autotvm.task.args_to_workload( - [data, kernel, strides, padding, dilation, layout, out_dtype], conv2d) - if is_depthwise: - return None - cfg = dispatch_ctx.query(target, workload) - if cfg.is_fallback: - _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise) - ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1] - oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1] +def _pack_data(data, kernel, ic_bn, oc_bn): + n, _, ih, iw = get_const_tuple(data.shape) + oc, ic, kh, kw = get_const_tuple(kernel.shape) - new_attrs[layout_name] = 'NCHW%dc' % ic_bn - new_attrs['out_layout'] = 'NCHW%dc' % oc_bn + ic_chunk = ic // ic_bn + oc_chunk = oc // oc_bn - new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), - dtype=data.dtype) + data = tvm.compute((n, ic_chunk, ih, iw, ic_bn), + lambda bs, c, h, w, vc: data[bs, c*ic_bn + vc, h, w], + name="data_vec") - out_channel, _, kh, kw = get_const_tuple(kernel.shape) - # (oc, ic, h, w) -> (OC, IC, h, w, ic, oc) - new_attrs['kernel_layout'] = 'OIHW%di%do' % (ic_bn, oc_bn) + kernel = tvm.compute( + (oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn), + lambda occ, icc, k_h, k_w, icb, ocb: + kernel[occ * oc_bn + ocb, + icc * ic_bn + icb, k_h, k_w], + name="kernel_vec") - # Store altered operator's config - new_kernel = tvm.placeholder((out_channel//oc_bn, in_channel//ic_bn, kh, kw, ic_bn, oc_bn), - dtype=kernel.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, new_attrs[layout_name], - new_attrs['out_layout'], out_dtype], conv2d_NCHWc) + return data, kernel - dispatch_ctx.update(target, new_workload, cfg) - return F.nn.contrib_conv2d_nchwc(*copy_inputs, **new_attrs) -@autotvm.register_topi_compute(conv2d_NCHWc, 'intel_graphics', 'direct') -def _decl_conv2d(cfg, data, kernel, strides, padding, dilation, - layout, out_layout, out_dtype='float32'): +@autotvm.register_topi_compute("conv2d_NCHWc.intel_graphics") +def conv2d_NCHWc(cfg, data, kernel, strides, padding, dilation, layout, + out_layout, out_dtype='float32'): """Conv2D operator for Intel Graphics backend. Parameters @@ -285,96 +224,48 @@ def _decl_conv2d(cfg, data, kernel, strides, padding, dilation, output : tvm.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ + if len(data.shape) == 5: + batch, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) + oc_chunk, _, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape) + in_channel = ic_chunk * ic_bn + num_filter = oc_chunk * oc_bn + else: + batch, in_channel, ih, iw = get_const_tuple(data.shape) + num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape) + dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation) + pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(padding, (kernel_height, kernel_width)) assert (dh, dw) == (1, 1), "Does not support dilation" + if isinstance(strides, (tuple, list)): + stride_h, stride_w = strides + else: + stride_h, stride_w = strides, strides + + data_shape = (batch, in_channel, ih, iw) + kernel_shape = (num_filter, in_channel, kernel_height, kernel_width) + _create_schedule_template(cfg, data_shape, kernel_shape, strides, padding, dilation) - n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) - oc_chunk, _, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape) - in_channel = ic_chunk * ic_bn - num_filter = oc_chunk * oc_bn if cfg.is_fallback: - _get_default_config(cfg, tvm.placeholder((n, in_channel, ih, iw), dtype=data.dtype), + _get_default_config(cfg, tvm.placeholder((batch, in_channel, ih, iw), dtype=data.dtype), tvm.placeholder((num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype), strides, padding, out_dtype) - return _decl_cl_spatialpack_NCHWc(cfg, data, kernel, strides, padding, dilation, out_dtype) - - -@conv2d_infer_layout.register("intel_graphics") -def _conv2d_infer_layout(workload, cfg): - _, data, kernel, strides, padding, dilation, layout, dtype = workload - batch_size, in_channel, in_height, in_width = data[:-1] - out_channel, _, k_height, k_width = kernel[:-1] - out_height = (in_height + 2 * padding[0] - k_height) // strides[0] + 1 - out_width = (in_width + 2 * padding[1] - k_width) // strides[1] + 1 - tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic) - in_layout = "NCHW%dc" % tile_ic - out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc) - out_layout = "NCHW%dc" % tile_oc - return ((in_shape, in_layout),), ((out_shape, out_layout),) - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc, 'intel_graphics', ['direct']) -def _schedule_conv2d_NCHWc(cfg, outs): - """Schedule for conv2d_nchw for Intel Graphics - - Parameters - ---------- - outs: Array of Tensor - The computation graph description of conv2d_nchw - in the format of an array of tensors. - - Returns - ------- - s: Schedule - The computation schedule for conv2d_nchw. - """ - outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs - s = tvm.create_schedule([x.op for x in outs]) - scheduled_ops = [] - - def traverse(op): - """inline all one-to-one-mapping operators except the last stage (output)""" - if tag.is_injective(op.tag): - if op not in s.outputs: - s[op].compute_inline() - for tensor in op.input_tensors: - if tensor.op.input_tensors and tensor.op not in scheduled_ops: - traverse(tensor.op) - if "conv" in op.tag: - _schedule_cl_spatialpack_NCHWc(cfg, s, op) - - scheduled_ops.append(op) - - traverse(outs[0].op) - - return s - -def _decl_cl_spatialpack_NCHWc(cfg, data, kernel, strides, padding, dilation, out_dtype='float16'): - batch, in_channel, in_height, in_width, vc = [util.get_const_int(x) for x in data.shape] - in_channel *= vc - num_filter, channel, kernel_h, kernel_w, ci, co = [util.get_const_int(x) for x in kernel.shape] - num_filter *= co - pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, kernel) - - ic_bn = vc - assert vc == ci + ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1] + oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1] - if isinstance(strides, (tuple, list)): - stride_h, stride_w = strides - else: - stride_h, stride_w = strides, strides + # Pack data if raw 4-D data is provided. + if len(data.shape) == 4: + data, kernel = _pack_data(data, kernel, ic_bn, oc_bn) out_channel = num_filter - out_height = simplify((in_height - kernel_h + pad_top + pad_down) // stride_h + 1) - out_width = simplify((in_width - kernel_w + pad_left + pad_right) // stride_w + 1) - oshape = (batch, out_channel // co, out_height, out_width, co) + out_height = simplify((ih - kernel_height + pad_top + pad_down) // stride_h + 1) + out_width = simplify((iw - kernel_width + pad_left + pad_right) // stride_w + 1) + oshape = (batch, out_channel // oc_bn, out_height, out_width, oc_bn) rc = tvm.reduce_axis((0, in_channel), name='rc') - ry = tvm.reduce_axis((0, kernel_h), name='ry') - rx = tvm.reduce_axis((0, kernel_w), name='rx') + ry = tvm.reduce_axis((0, kernel_height), name='ry') + rx = tvm.reduce_axis((0, kernel_width), name='rx') block_h = cfg["block_oh"].val block_w = cfg["block_ow"].val @@ -388,7 +279,7 @@ def _decl_cl_spatialpack_NCHWc(cfg, data, kernel, strides, padding, dilation, ou if out_width % block_w != 0: c_w = (out_width // block_w + 1) * block_w - cshape = (batch, out_channel // co, c_h, c_w, co) + cshape = (batch, out_channel // oc_bn, c_h, c_w, oc_bn) pad_before = [0, 0, pad_top, pad_left, 0] pad_after = [0, 0, pad_down + c_h - out_height, pad_right + \ @@ -397,7 +288,7 @@ def _decl_cl_spatialpack_NCHWc(cfg, data, kernel, strides, padding, dilation, ou or pad_right + c_w - out_width != 0) DOUNPACK = (c_h - out_height != 0 or c_w - out_width != 0) if DOPAD: - temp = pad(data, pad_before, pad_after, name="pad_temp") + temp = nn.pad(data, pad_before, pad_after, name="pad_temp") else: temp = data @@ -406,33 +297,53 @@ def _decl_cl_spatialpack_NCHWc(cfg, data, kernel, strides, padding, dilation, ou lambda nn, ff, yy, xx, ff_v: \ tvm.sum( temp[nn, rc//ic_bn, yy * stride_h + ry, xx * stride_w + rx, rc%ic_bn]. \ - astype(out_dtype) * + astype(out_dtype) * kernel[ff, rc//ic_bn, ry, rx, rc%ic_bn, ff_v].astype(out_dtype), - axis=[rc, ry, rx]), tag="conv", name='conv') + axis=[rc, ry, rx]), tag="conv2d_NCHWc", name='conv2d_NCHWc') if DOUNPACK: output = tvm.compute( oshape, lambda nn, ff, yy, xx, ff_v: conv[nn][ff][yy][xx][ff_v], - name='output_unpack', tag="conv_unpack") + name='output_unpack', tag="conv2d_NCHWc_unpack") else: output = conv - return output +@autotvm.register_topi_schedule("conv2d_NCHWc.intel_graphics") +def schedule_conv2d_NCHWc(cfg, outs): + """Schedule for conv2d_nchw for Intel Graphics + + Parameters + ---------- + outs: Array of Tensor + The computation graph description of conv2d_nchw + in the format of an array of tensors. + + Returns + ------- + s: Schedule + The computation schedule for conv2d_nchw. + """ + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + """inline all one-to-one-mapping operators except the last stage (output)""" + if "conv2d_NCHWc" in op.tag: + _schedule_cl_spatialpack_NCHWc(cfg, s, op) + + traverse_inline(s, outs[0].op, _callback) + + return s + + def _schedule_cl_spatialpack_NCHWc(cfg, s, op): output = op.output(0) - conv = op.input_tensors[0] - if conv.op.name == "conv": - temp = s[conv].op.input_tensors[0] - kernel = s[conv].op.input_tensors[1] - temp_W = s.cache_read(temp, "warp", [conv]) - conv_L = s.cache_write(conv, "local") - SCHEDULE_OUTPUT = True - else: + if op.name == "conv2d_NCHWc": temp = op.input_tensors[0] kernel = op.input_tensors[1] temp_W = s.cache_read(temp, "warp", [output]) @@ -443,8 +354,32 @@ def _schedule_cl_spatialpack_NCHWc(cfg, s, op): s[output].compute_inline() conv = s.outputs[0] SCHEDULE_OUTPUT = False + else: # conv2d_NCHWc_unpack + conv = op.input_tensors[0] + temp = s[conv].op.input_tensors[0] + kernel = s[conv].op.input_tensors[1] + temp_W = s.cache_read(temp, "warp", [conv]) + conv_L = s.cache_write(conv, "local") + SCHEDULE_OUTPUT = True kernel_L = s.cache_read(kernel, "local", [conv_L]) + if temp.name == "pad_temp": + data = temp.op.input_tensors[0] + # TODO(@Laurawly): Do we need to schedule pad op here? + else: + data = temp + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make records accurate. + # this part will be folded during Relay fold_constant pass. + s[data].pragma(s[data].op.axis[0], "debug_skip_region") + s[kernel].pragma(s[kernel].op.axis[0], "debug_skip_region") + elif isinstance(kernel.op, tvm.tensor.ComputeOp) and kernel.name == "kernel_vec": + # data and kernel are not pre-computed, schedule layout transform here. + # TODO(@Laurawly): Add schedule for data and kernel pack + pass + OUTPUT_BLOCK_HEIGHT = cfg["block_oh"].val OUTPUT_BLOCK_WIDTH = cfg["block_ow"].val @@ -515,19 +450,7 @@ def _schedule_cl_spatialpack_NCHWc(cfg, s, op): tile_and_bind3d(s, out, w, h, vc, 4, 8, 8) -def conv_arg_to_workload(data, kernel, strides, padding, layout, out_dtype): - """convert argument to workload""" - if len(kernel.shape) == 4: - raw_kernel = kernel - else: # the input kernel is transformed by alter_op_layout - shape = get_const_tuple(kernel.shape) - raw_kernel = tvm.placeholder((shape[0] * shape[4], shape[1], shape[2], shape[3]), - dtype=kernel.dtype) - return ('conv2d', ) + autotvm.task.args_to_workload( - [data, raw_kernel, strides, padding, layout, out_dtype]) - -@autotvm.register_topi_compute(conv2d, 'intel_graphics', 'direct') -def decl_conv2d(cfg, data, kernel, stride, padding, dilation, layout='NCHW', out_dtype='float32'): +def conv2d_nchw(data, kernel, stride, padding, dilation, out_dtype='float32'): """Conv2D operator for Intel Graphics backend. Parameters @@ -540,21 +463,18 @@ def decl_conv2d(cfg, data, kernel, stride, padding, dilation, layout='NCHW', out stride size, or [stride_height, stride_width] padding : int or a list/tuple of two ints padding size, or [pad_height, pad_width] - layout : str - layout of data Returns ------- output : tvm.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ - assert layout == 'NCHW', "only support NCHW convolution on intel gpu" assert data.shape[0].value == 1, "only support batch size=1 convolution on intel gpu" assert data.dtype == kernel.dtype, "Do not support inputs with different data types now." - return _decl_cl_spatialpack(cfg, data, kernel, stride, padding, layout, out_dtype) + return _decl_cl_spatialpack(data, kernel, stride, padding, out_dtype) + -@autotvm.task.register_topi_schedule(generic.schedule_conv2d_nchw, 'intel_graphics', ['direct']) -def schedule_conv2d_nchw(cfg, outs): +def schedule_conv2d_nchw(outs): """Schedule for conv2d_nchw for Intel Graphics Parameters @@ -569,28 +489,20 @@ def schedule_conv2d_nchw(cfg, outs): """ outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) - scheduled_ops = [] - def traverse(op): + def _callback(op): """inline all one-to-one-mapping operators except the last stage (output)""" - if tag.is_broadcast(op.tag): - if op not in s.outputs: - s[op].compute_inline() - for tensor in op.input_tensors: - if tensor.op.input_tensors and tensor.op not in scheduled_ops: - traverse(tensor.op) if 'conv2d' in op.tag: - _schedule_cl_spatialpack(cfg, s, op) - - scheduled_ops.append(op) + _schedule_cl_spatialpack(s, op) - traverse(outs[0].op) + traverse_inline(s, outs[0].op, _callback) return s -def _decl_cl_spatialpack(cfg, data, kernel, stride, padding, layout, out_dtype='float16'): + +def _decl_cl_spatialpack(data, kernel, stride, padding, out_dtype='float16'): batch, in_channel, in_height, in_width = [util.get_const_int(x) for x in data.shape] num_filter, channel, kernel_h, kernel_w = [util.get_const_int(x) for x in kernel.shape] - pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, kernel) + pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(padding, (kernel_h, kernel_w)) if isinstance(stride, (tuple, list)): stride_h, stride_w = stride @@ -606,8 +518,6 @@ def _decl_cl_spatialpack(cfg, data, kernel, stride, padding, layout, out_dtype=' ry = tvm.reduce_axis((0, kernel_h), name='ry') rx = tvm.reduce_axis((0, kernel_w), name='rx') - block_w = 1 - block_h = 1 if stride_h == 2: if num_filter + kernel_h == 515: block_h = 4 @@ -640,7 +550,7 @@ def _decl_cl_spatialpack(cfg, data, kernel, stride, padding, layout, out_dtype=' pad_before = [0, 0, pad_top, pad_left] pad_after = [0, 0, pad_down + c_h - block_h, pad_right + c_w - block_w] - temp = pad(data, pad_before, pad_after, name="pad_temp") + temp = nn.pad(data, pad_before, pad_after, name="pad_temp") nv = 16 if num_filter % nv != 0: @@ -667,13 +577,12 @@ def _decl_cl_spatialpack(cfg, data, kernel, stride, padding, layout, out_dtype=' oshape, lambda nn, ff, yy, xx: conv[nn][ff//nv][yy][xx][ff%nv], - name='output_unpack', tag='conv2d', - attrs={'workload': conv_arg_to_workload(data, kernel, stride, padding, - layout, out_dtype)}) + name='output_unpack', tag='conv2d') return output -def _schedule_cl_spatialpack(cfg, s, op): + +def _schedule_cl_spatialpack(s, op): output = op.output(0) _, _, out_height, out_width = [util.get_const_int(x) for x in output.shape] @@ -742,7 +651,7 @@ def _schedule_cl_spatialpack(cfg, s, op): s[kernel_vec].compute_inline() # schedule kernel_L - if "2_14" in s[conv].op.tag: + if OUTPUT_BLOCK_HEIGHT == 2 and OUTPUT_BLOCK_WIDTH == 14: s[kernel_L].compute_at(s[conv_L], ry) else: s[kernel_L].compute_at(s[conv_L], rx) diff --git a/topi/python/topi/intel_graphics/conv2d_alter_op.py b/topi/python/topi/intel_graphics/conv2d_alter_op.py new file mode 100644 index 0000000000000..d21a86909baf8 --- /dev/null +++ b/topi/python/topi/intel_graphics/conv2d_alter_op.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member +"""Conv2D alter op and legalize functions for x86""" + +import tvm +from tvm import relay +from tvm import autotvm + +from ..util import get_const_tuple +from ..nn import conv2d_alter_layout, conv2d_infer_layout +from .conv2d import _get_default_config + + +@conv2d_alter_layout.register(["intel_graphics"]) +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest): + cfg = dispatch_ctx.query(target, None) + workload = cfg.workload + else: + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + + topi_tmpl = workload[0] + new_attrs = {k : attrs[k] for k in attrs.keys()} + + padding = attrs.get_int_tuple("padding") + strides = attrs.get_int_tuple("strides") + dilation = attrs.get_int_tuple("dilation") + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data_tensor, kernel_tensor = tinfos + data_dtype = data_tensor.dtype + kernel_dtype = kernel_tensor.dtype + out_dtype = out_type.dtype + + if topi_tmpl == "conv2d_NCHWc.intel_graphics": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + if cfg.is_fallback: + _get_default_config(cfg, data_tensor, kernel_tensor, strides, padding, + out_dtype, False) + batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape) + out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape) + ic_bn = cfg["tile_ic"].val if hasattr(cfg["tile_ic"], "val") else cfg["tile_ic"].size[-1] + oc_bn = cfg["tile_oc"].val if hasattr(cfg["tile_oc"], "val") else cfg["tile_oc"].size[-1] + + # update new attrs + new_attrs['channels'] = out_channel + new_attrs['data_layout'] = 'NCHW%dc' % ic_bn + # (oc, ic, h, w) -> (OC, IC, h, w, ic, oc) + new_attrs['kernel_layout'] = 'OIHW%di%do' % (ic_bn, oc_bn) + new_attrs['out_layout'] = 'NCHW%dc' % oc_bn + + # Store altered operator's config + new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), + dtype=data_dtype) + new_kernel = tvm.placeholder((out_channel//oc_bn, in_channel//ic_bn, + kh, kw, ic_bn, oc_bn), dtype=kernel_dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, new_attrs["data_layout"], + new_attrs["out_layout"], out_dtype], "conv2d_NCHWc.intel_graphics") + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs) + else: + return None + + +@conv2d_infer_layout.register("intel_graphics") +def _conv2d_infer_layout(workload, cfg): + _, data, kernel, strides, padding, dilation, layout, dtype = workload + batch_size, in_channel, in_height, in_width = data[:-1] + out_channel, _, k_height, k_width = kernel[:-1] + out_height = (in_height + 2 * padding[0] - k_height) // strides[0] + 1 + out_width = (in_width + 2 * padding[1] - k_width) // strides[1] + 1 + tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] + in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic) + in_layout = "NCHW%dc" % tile_ic + out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc) + out_layout = "NCHW%dc" % tile_oc + return ((in_shape, in_layout),), ((out_shape, out_layout),) diff --git a/topi/python/topi/intel_graphics/depthwise_conv2d.py b/topi/python/topi/intel_graphics/depthwise_conv2d.py index 97b7376933de8..90f4c85d21db1 100644 --- a/topi/python/topi/intel_graphics/depthwise_conv2d.py +++ b/topi/python/topi/intel_graphics/depthwise_conv2d.py @@ -20,16 +20,17 @@ from tvm import autotvm from ..util import traverse_inline from .. import tag -from .. import generic, nn +from .. import nn from ..nn.depthwise_conv2d import depthwise_conv2d_infer_layout # register original implementation of depthwise_conv2d_nchw since we don't need to change this part -autotvm.register_topi_compute(nn.depthwise_conv2d_nchw, ['intel_graphics'], 'direct', - nn.depthwise_conv2d_nchw.fdefault) +@autotvm.register_topi_compute("depthwise_conv2d_nchw.intel_graphics") +def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype): + return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_depthwise_conv2d_nchw, \ - ['intel_graphics'], 'direct') -def schedule_depthwise_conv2d_nchw_intel_graphics(cfg, outs): + +@autotvm.register_topi_schedule("depthwise_conv2d_nchw.intel_graphics") +def schedule_depthwise_conv2d_nchw(cfg, outs): """Schedule for depthwise_conv2d nchw forward. Parameters @@ -68,7 +69,7 @@ def _callback(op): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - target.target_name, target.model, 'depthwise_conv2d_nchw', 'direct') + target.target_name, target.model, 'depthwise_conv2d_nchw.intel_graphics') cfg.fallback_with_reference_log(ref_log) cfg['unroll_explicit'].val = 0 ##### space definition end ##### @@ -132,7 +133,7 @@ def _callback(op): traverse_inline(s, outs[0].op, _callback) return s -@generic.schedule_depthwise_conv2d_nhwc.register(["intel_graphics"]) + def schedule_depthwise_conv2d_nhwc(outs): """Schedule for depthwise_conv2d nhwc forward. diff --git a/topi/python/topi/mali/conv2d.py b/topi/python/topi/mali/conv2d.py index 35a86e991c236..0ee92280ca968 100644 --- a/topi/python/topi/mali/conv2d.py +++ b/topi/python/topi/mali/conv2d.py @@ -17,22 +17,20 @@ # pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return """conv2d schedule on ARM Mali GPU""" import tvm +from tvm import relay from tvm import autotvm from tvm.autotvm.task.space import get_factors -from ..generic import schedule_conv2d_nchw, schedule_conv2d_winograd_without_weight_transform from ..util import traverse_inline, get_const_int, get_const_tuple -from ..nn import conv2d, conv2d_winograd_without_weight_transform, \ - get_pad_tuple, pad, conv2d_alter_layout +from .. import nn from ..nn.winograd_util import winograd_transform_matrices # reuse some compute declarations from ARM CPU -from ..arm_cpu.conv2d import _alter_conv2d_layout_arm from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw -@autotvm.register_topi_compute(conv2d, 'mali', ['direct']) -def conv2d_mali(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): +@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.mali") +def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): """TOPI compute callback for conv2d Parameters @@ -57,9 +55,6 @@ def conv2d_mali(cfg, data, kernel, strides, padding, dilation, layout, out_dtype dilation : list of two ints [dilation_height, dilation_width] - layout : str - layout of data - out_dtype: str The output type. This is used for mixed precision. @@ -68,14 +63,11 @@ def conv2d_mali(cfg, data, kernel, strides, padding, dilation, layout, out_dtype output : tvm.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ - if layout == 'NCHW': - return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, - dilation, out_dtype, num_tile=3) - else: - raise ValueError("Unsupported layout {}".format(layout)) + return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, + dilation, out_dtype, num_tile=3) -@autotvm.register_topi_schedule(schedule_conv2d_nchw, 'mali', ['direct', 'winograd']) -def schedule_conv2d_nchw_mali(cfg, outs): +@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.mali") +def schedule_conv2d_nchw_spatial_pack(cfg, outs): """TOPI schedule callback for conv2d Parameters @@ -113,9 +105,6 @@ def _callback(op): _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec) - if 'winograd_conv2d_output' in op.tag: - _schedule_winograd(cfg, s, op) - traverse_inline(s, outs[0].op, _callback) return s @@ -200,13 +189,27 @@ def _pick_tile_size(data, kernel): else: return 2 -@autotvm.register_topi_compute(conv2d, 'mali', ['winograd']) -def conv2d_mali_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): + +@autotvm.register_topi_compute("conv2d_nchw_winograd.mali") +def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype): tile_size = _pick_tile_size(data, kernel) - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, + return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size) -def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): + +@autotvm.register_topi_schedule("conv2d_nchw_winograd.mali") +def schedule_conv2d_nchw_winograd(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if 'winograd_conv2d_output' in op.tag: + _schedule_winograd(cfg, s, op) + + traverse_inline(s, outs[0].op, _callback) + return s + + +def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size): N, CI, IH, IW = get_const_tuple(data.shape) if isinstance(dilation, int): dilation_h = dilation_w = dilation @@ -214,9 +217,8 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt dilation_h, dilation_w = dilation if len(kernel.shape) == 4: - if dilation_h != 1 or dilation_w != 1: - kernel = dilate(kernel, (1, 1, dilation_h, dilation_w)) + kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w)) pre_computed = False CO, _, KH, KW = get_const_tuple(kernel.shape) else: @@ -226,11 +228,10 @@ def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dt CO *= VC KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1 HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) - pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) + pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW)) - assert layout == 'NCHW' assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1 - data_pad = pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") + data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad") r = KW m = tile_size @@ -420,34 +421,85 @@ def _schedule_winograd(cfg, s, op): s[Y].compute_at(s[output], tt) -##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM ##### -@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, 'mali', ['winograd']) -def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size): - """TOPI compute callback""" - return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, - tile_size) +##### REGISTER ALTER OP LAYOUT ##### +@nn.conv2d_alter_layout.register(["mali"]) +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + if cfg.is_fallback: # if is fallback, clear query cache and return None + autotvm.task.clear_fallback_cache(target, workload) + return None -@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform, - 'mali', ['winograd']) -def schedule_conv2d_winograd_without_weight_transform_(cfg, outs): - """TOPI schedule callback""" - s = tvm.create_schedule([x.op for x in outs]) + topi_tmpl = workload[0] + new_attrs = {k: attrs[k] for k in attrs.keys()} - def _callback(op): - if 'winograd_conv2d_output' in op.tag: - _schedule_winograd(cfg, s, op) + strides = attrs.get_int_tuple("strides") + padding = attrs.get_int_tuple("padding") + dilation = attrs.get_int_tuple("dilation") + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data, kernel = tinfos + out_dtype = out_type.dtype - traverse_inline(s, outs[0].op, _callback) - return s + idxd = tvm.indexdiv + if topi_tmpl == "conv2d_nchw_spatial_pack.mali": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + VC = cfg['tile_co'].size[-1] -##### REGISTER ALTER OP LAYOUT ##### -@conv2d_alter_layout.register(["mali"]) -def _alter_conv2d_layout(attrs, inputs, tinfos, F): - try: - return _alter_conv2d_layout_arm(attrs, inputs, tinfos, F) - except KeyError: # to filter out fallback opencl templates + new_attrs['kernel_layout'] = 'OIHW%do' % VC + + new_data = data + new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + "conv2d_nchw_spatial_pack.mali") + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.conv2d(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_nchw_winograd.mali": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + N, CI, H, W = get_const_tuple(data.shape) + CO, _, KH, KW = get_const_tuple(kernel.shape) + tile_size = _pick_tile_size(data, kernel) + VC = cfg['tile_bna'].val + + weight_expr = inputs[1] + weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform( + weight_expr, tile_size=tile_size) + weight_expr = relay.reshape(weight_expr, + newshape=(KH + tile_size - 1, + KW + tile_size - 1, + idxd(CO, VC), VC, CI)) + weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3]) + + new_attrs['tile_size'] = tile_size + + new_data = data + new_kernel = tvm.placeholder((KH + tile_size - 1, + KW + tile_size -1, + idxd(CO, VC), CI, VC), + kernel.dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, out_dtype], + 'conv2d_nchw_winograd.mali') + dispatch_ctx.update(target, new_workload, cfg) + + return relay.nn.contrib_conv2d_winograd_without_weight_transform( + inputs[0], weight_expr, **new_attrs) + else: return None diff --git a/topi/python/topi/mali/dense.py b/topi/python/topi/mali/dense.py index 6096a99c97c2e..3b233e92ba8ae 100644 --- a/topi/python/topi/mali/dense.py +++ b/topi/python/topi/mali/dense.py @@ -22,12 +22,18 @@ import tvm from tvm import autotvm -from .. import generic, nn +from .. import nn from ..util import traverse_inline -autotvm.register_topi_compute(nn.dense, 'mali', 'direct', nn.dense.fdefault) -@autotvm.register_topi_schedule(generic.schedule_dense, 'mali', 'direct') + +@autotvm.register_topi_compute('dense.mali') +def dense(_, data, weight, bias=None, out_dtype=None): + """Dense operator on Mali""" + return nn.dense(data, weight, bias, out_dtype) + + +@autotvm.register_topi_schedule('dense.mali') def schedule_dense(cfg, outs): """Schedule for dense operator. @@ -52,11 +58,11 @@ def _callback(op): vec_size = [1, 2, 4, 8, 16] max_unroll = 32 - dense = op.output(0) + dense_out = op.output(0) output = outs[0] y, x = s[output].op.axis - c = s[dense].op.reduce_axis[0] + c = s[dense_out].op.reduce_axis[0] ##### space definition begin ##### cfg.define_split('tile_y', y, num_outputs=3) @@ -66,12 +72,12 @@ def _callback(op): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - 'mali', 'rk3399', 'dense', 'direct') + 'mali', 'rk3399', 'dense.mali') cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### - if dense.op in s.outputs: - dense = s.cache_write(output, 'local') + if dense_out.op in s.outputs: + dense_out = s.cache_write(output, 'local') by, ty, yi = cfg['tile_y'].apply(s, output, y) bx, tx, xi = cfg['tile_x'].apply(s, output, x) @@ -85,23 +91,25 @@ def _callback(op): s[output].unroll(yi) if cfg['tile_x'].size[-1] in vec_size: s[output].vectorize(xi) - s[dense].compute_at(s[output], tx) + s[dense_out].compute_at(s[output], tx) - k = s[dense].op.reduce_axis[0] - y, x = s[dense].op.axis - k, k_unroll = cfg['c_unroll'].apply(s, dense, k) - s[dense].reorder(k, k_unroll, y, x) - s[dense].unroll(k_unroll) + k = s[dense_out].op.reduce_axis[0] + y, x = s[dense_out].op.axis + k, k_unroll = cfg['c_unroll'].apply(s, dense_out, k) + s[dense_out].reorder(k, k_unroll, y, x) + s[dense_out].unroll(k_unroll) if cfg['tile_y'].size[-1] < max_unroll: - s[dense].unroll(y) + s[dense_out].unroll(y) if cfg['tile_x'].size[-1] in vec_size: - s[dense].vectorize(x) + s[dense_out].vectorize(x) traverse_inline(s, outs[0].op, _callback) return s + def fuse_and_bind(s, tensor, axis=None, num_thread=None): """ fuse all the axis and bind to GPU threads """ + # TODO(@comaniac): figure out where this function is used. axis = axis or s[tensor].op.axis fused = s[tensor].fuse(*axis) bx, tx = s[tensor].split(fused, num_thread) diff --git a/topi/python/topi/mali/depthwise_conv2d.py b/topi/python/topi/mali/depthwise_conv2d.py index 274b2944e4d9e..4ff17e534febc 100644 --- a/topi/python/topi/mali/depthwise_conv2d.py +++ b/topi/python/topi/mali/depthwise_conv2d.py @@ -20,17 +20,18 @@ import tvm from tvm import autotvm -from ..generic import schedule_depthwise_conv2d_nchw -from ..nn import depthwise_conv2d_nchw +from .. import nn from ..util import traverse_inline # register original implementation of depthwise_conv2d_nchw since we don't need to change this part -autotvm.register_topi_compute(depthwise_conv2d_nchw, 'mali', 'direct', - depthwise_conv2d_nchw.fdefault) +@autotvm.register_topi_compute("depthwise_conv2d_nchw.mali") +def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype): + return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) + # register customized schedule for arm cpu. -@autotvm.register_topi_schedule(schedule_depthwise_conv2d_nchw, 'mali', 'direct') -def schedule_depthwise_conv2d_nchw_mali(cfg, outs): +@autotvm.register_topi_schedule("depthwise_conv2d_nchw.mali") +def schedule_depthwise_conv2d_nchw(cfg, outs): """Schedule depthwise conv2d Parameters @@ -64,7 +65,7 @@ def _schedule(pad_data, kernel, conv): # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( - 'mali', 'rk3399', 'depthwise_conv2d_nchw', 'direct') + 'mali', 'rk3399', 'depthwise_conv2d_nchw.mali') cfg.fallback_with_reference_log(ref_log) ###### space definition end ###### diff --git a/topi/python/topi/nn/batch_matmul.py b/topi/python/topi/nn/batch_matmul.py index 7b872ceacf29d..d69562c4daf6c 100644 --- a/topi/python/topi/nn/batch_matmul.py +++ b/topi/python/topi/nn/batch_matmul.py @@ -20,7 +20,7 @@ import tvm from ..util import get_const_tuple -def batch_matmul_default(x, y): +def batch_matmul(x, y): """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are data in batch. @@ -48,23 +48,3 @@ def batch_matmul_default(x, y): return tvm.compute((batch, M, N), lambda b, i, j: tvm.sum(x[b, i, k] * y[b, j, k], axis=k), tag='batch_matmul') - -@tvm.target.generic_func -def batch_matmul(x, y): - """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are - data in batch. - - Parameters - ---------- - x : tvm.Tensor - 3-D with shape [batch, M, K] - - y : tvm.Tensor - 3-D with shape [batch, N, K] - - Returns - ------- - output : tvm.Tensor - 3-D with shape [batch, M, N] - """ - return batch_matmul_default(x, y) diff --git a/topi/python/topi/nn/bitserial_conv2d.py b/topi/python/topi/nn/bitserial_conv2d.py index e1f8f819968f7..f18a5aae7eedf 100644 --- a/topi/python/topi/nn/bitserial_conv2d.py +++ b/topi/python/topi/nn/bitserial_conv2d.py @@ -19,13 +19,11 @@ """Bitserial Conv2D operators""" from __future__ import absolute_import as _abs import tvm -from tvm import autotvm from .pad import pad from .util import get_pad_tuple -from .bitserial_util import bitpack, binary_op_multiplier +from .bitserial_util import bitpack from ..util import get_const_tuple -@tvm.target.generic_func def bitserial_conv2d_nchw(data, kernel, stride, padding, activation_bits, weight_bits, pack_dtype='uint32', out_dtype='int16', unipolar=True): """Bitserial Conv2D operator. @@ -117,7 +115,6 @@ def _conv(nn, ff, yy, xx): return tvm.compute((batch, out_channel, out_height, out_width), _conv, name="Conv2dOutput", tag="bitserial_conv2d_nchw") -@tvm.target.generic_func def bitserial_conv2d_nhwc(data, kernel, stride, padding, activation_bits, weight_bits, pack_dtype='uint32', out_dtype='int16', unipolar=True): """Bitserial Conv2D operator. @@ -213,222 +210,6 @@ def _conv(nn, yy, xx, ff): return conv -@autotvm.register_topi_compute(bitserial_conv2d_nchw, ['cpu', 'arm_cpu'], 'direct') -def spatial_pack_nchw(cfg, data, kernel, stride, padding, in_bits, weight_bits, - pack_dtype='uint32', out_dtype='int16', unipolar=True): - """ Compute convolution with pack on spatial axes. """ - assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" - data_q = bitpack(data, in_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype) - # Check if kernel is already bitpacked - if len(kernel.shape) == 4: - kernel_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype) - KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape) - else: - kernel_vec = kernel - OCO, _, KH, KW, KB, VC = get_const_tuple(kernel_vec.shape) - CO = OCO * VC - - IB, N, CI, H, W = get_const_tuple(data_q.shape) - KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape) - - if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2): - TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel) - else: - TPAD, LPAD, DPAD, RPAD = padding - pad_before = [0, 0, 0, TPAD, LPAD] - pad_after = [0, 0, 0, DPAD, RPAD] - - if isinstance(stride, (tuple, list)): - HSTR, WSTR = stride - else: - HSTR, WSTR = stride, stride - HCAT, WCAT = KH-1, KW-1 - - TH = H + TPAD + DPAD - TW = W + LPAD + RPAD - OH = (H + TPAD + DPAD - KH) // HSTR + 1 - OW = (W + LPAD + RPAD - KW) // WSTR + 1 - - # ==================== define configuration space ==================== - n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW) - ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW) - ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits) - - co, vc = cfg.define_split('tile_co', co, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - cfg.define_annotate('ann_reduce', [ib, kb, kh, kw], policy='try_unroll') - - cfg.define_reorder("reorder_0", - [n, co, oh, ow, vc, vh, vw, kh, kw, kb, ib, ci], - policy='interval_all', interval=(6, 11)) - # binary ops - cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype)) - # ==================== - - VC = cfg["tile_co"].size[-1] - VH = cfg["tile_oh"].size[-1] - VW = cfg["tile_ow"].size[-1] - - dvshape = (1, TH//(VH*HSTR), TW//(VW*WSTR), CI, VH*HSTR+HCAT, VW*WSTR+WCAT, IB) - kvshape = (CO//VC, CI, KH, KW, KB, VC) - ovshape = (1, CO//VC, OH//VH, OW//VW, VH, VW, VC) - oshape = (1, CO, OH, OW) - - if (TPAD != 0 and RPAD != 0): - data_pad = pad(data_q, pad_before, pad_after, name="data_pad") - else: - data_pad = data_q - - data_vec = tvm.compute(dvshape, lambda n, h, w, ci, vh, vw, b: \ - data_pad[b][n][ci][h*VH*HSTR+vh][w*VW*WSTR+vw], name='data_vec') - - if len(kernel.shape) == 4: - kernel_vec = tvm.compute(kvshape, lambda co, ci, dh, dw, b, vc: \ - kernel_q[b][co*VC+vc][ci][dh][dw], name='kernel_vec') - - ci = tvm.reduce_axis((0, CI), name='ci') - dh = tvm.reduce_axis((0, KH), name='dh') - dw = tvm.reduce_axis((0, KW), name='dw') - b1 = tvm.reduce_axis((0, IB), name='ib') - b2 = tvm.reduce_axis((0, KB), name='kb') - - def _conv(n, co, h, w, vh, vw, vc): - b1b2 = (b1+b2).astype(out_dtype) - if unipolar: - return tvm.sum((tvm.popcount( - data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1].astype(out_dtype) & - kernel_vec[co, ci, dh, dw, b2, vc].astype(out_dtype)) - - tvm.popcount( - data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1].astype(out_dtype) - & ~kernel_vec[co, ci, dh, dw, b2, vc]).astype(out_dtype)) << b1b2, - axis=[ci, dh, dw, b1, b2]) - - return tvm.sum((tvm.popcount( - data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1] & - kernel_vec[co, ci, dh, dw, b2, vc])).astype(out_dtype) << b1b2, - axis=[ci, dh, dw, b1, b2]) - - conv = tvm.compute(ovshape, _conv, name='conv_out') - idxd = tvm.indexdiv - idxm = tvm.indexmod - - return tvm.compute( - oshape, lambda n, co, h, w: - conv[n, - idxd(co, VC), idxd(h, VH), idxd(w, VW), - idxm(h, VH), idxm(w, VW), idxm(co, VC)], - name='conv_vec', tag='spatial_bitserial_conv_nchw') - -@autotvm.register_topi_compute(bitserial_conv2d_nhwc, 'cpu', 'direct') -def spatial_pack_nhwc(cfg, data, kernel, stride, padding, in_bits, weight_bits, - pack_dtype='uint32', out_dtype='int16', unipolar=True): - """ Compute convolution with pack on spatial axes. """ - assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" - data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype) - pack_kernel = len(kernel.shape) == 4 - - if pack_kernel: - kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype) - else: - kernel_q = kernel - - KH, KW, _, CO, KB = get_const_tuple(kernel_q.shape) - N, H, W, CI, IB = get_const_tuple(data_q.shape) - - if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2): - TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel) - else: - TPAD, LPAD, DPAD, RPAD = padding - pad_before = [0, TPAD, LPAD, 0, 0] - pad_after = [0, DPAD, RPAD, 0, 0] - - if isinstance(stride, (tuple, list)): - HSTR, WSTR = stride - else: - HSTR, WSTR = stride, stride - HCAT, WCAT = KH-1, KW-1 - - PAD_H = H + (TPAD + DPAD) - PAD_W = W + (LPAD + RPAD) - OH = (PAD_H - KH) // HSTR + 1 - OW = (PAD_W - KW) // WSTR + 1 - oshape = (1, OH, OW, CO) - - # ==================== define configuration space ==================== - n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO) - ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW) - ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits) - - co, vc = cfg.define_split('tile_co', co, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2, - filter=lambda x: max(x.size[1:]) <= 16) - cfg.define_annotate('ann_reduce', [ib, kb, kh, kw], policy='try_unroll') - cfg.define_reorder("reorder_0", - [n, oh, ow, co, vh, vw, kh, kw, kb, ib, vc, ci], - policy='interval_all', interval=(3, 7)) - # binary ops - cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype)) - # ==================== - - VC = cfg["tile_co"].size[-1] - VH = cfg["tile_oh"].size[-1] - VW = cfg["tile_ow"].size[-1] - - dvshape = (1, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, CI, IB) - kvshape = (CO, KH, KW, CI, VC, KB) - ovshape = (1, OH, OW, CO, VH, VW, VC) - oshape = (1, OH, OW, CO) - - if (DPAD != 0 and RPAD != 0): - data_pad = pad(data_q, pad_before, pad_after, name="data_pad") - else: - data_pad = data_q - - data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, ci, b: \ - data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][ci][b], name='data_vec') - - kernel_vec = tvm.compute(kvshape, lambda co, dh, dw, ci, vc, b: \ - kernel_q[dh][dw][ci][co*VC+vc][b], name='kernel_vec') - - ci = tvm.reduce_axis((0, CI), name='ci') - dh = tvm.reduce_axis((0, KH), name='dh') - dw = tvm.reduce_axis((0, KW), name='dw') - b1 = tvm.reduce_axis((0, IB), name='ib') - b2 = tvm.reduce_axis((0, KB), name='kb') - - def _conv(n, h, w, co, vh, vw, vc): - b1b2 = (b1+b2).astype(out_dtype) - if unipolar: - return tvm.sum( - ((tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] & - kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) - - tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1]& - ~kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype)) << b1b2), - axis=[dh, dw, ci, b1, b2]) - - return tvm.sum(tvm.popcount( - data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] & - kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) << b1b2, - axis=[dh, dw, ci, b1, b2]) - - conv = tvm.compute(ovshape, _conv, name='conv') - - idxd = tvm.indexdiv - idxm = tvm.indexmod - return tvm.compute( - oshape, lambda n, h, w, co: - conv[n, - idxd(h, VH), idxd(w, VW), idxd(co, VC), - idxm(h, VH), idxm(w, VW), idxm(co, VC)], - name='output_unpack', tag='spatial_bitserial_conv_nhwc') - @tvm.target.generic_func def bitserial_conv2d_legalize(attrs, inputs, types): """Legalizes Bitserial Conv2D op. diff --git a/topi/python/topi/nn/bitserial_dense.py b/topi/python/topi/nn/bitserial_dense.py index d77a1b7b0fc21..fa1b5df7d066e 100644 --- a/topi/python/topi/nn/bitserial_dense.py +++ b/topi/python/topi/nn/bitserial_dense.py @@ -18,11 +18,9 @@ """Bitserial Dense operator.""" from __future__ import absolute_import import tvm -from tvm import autotvm from topi.util import get_const_tuple -from .bitserial_util import bitpack, binary_op_multiplier +from .bitserial_util import bitpack -@tvm.target.generic_func def bitserial_dense(data, weight, data_bits, weight_bits, pack_dtype='uint32', out_dtype='int16', unipolar=True): """The default implementation of bitserial dense in topi. @@ -66,78 +64,3 @@ def bitserial_dense(data, weight, data_bits, weight_bits, pack_dtype='uint32', if unipolar: return matmul_unipolar return matmul - - -@autotvm.register_topi_compute(bitserial_dense, ['cpu'], 'direct') -def bitserial_dense_default(cfg, data, weight, data_bits, weight_bits, pack_dtype='uint32', - out_dtype='int16', unipolar=True): - """Bitserial dense implementation. TODO: Why are these separate - - Parameters - ---------- - data : tvm.Tensor - 2-D with shape [batch, in_dim] - weight : tvm.Tensor - 2-D with shape [out_dim, in_dim] or - 3-D with shape [out_dim, weight_bits, in_dim] - Returns - ------- - output : tvm.Tensor - 2-D with shape [batch, out_dim] - """ - data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype) - if len(weight.shape) == 2: - weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype) - else: - weight_packed = weight - Y, DB, K = get_const_tuple(data_packed.shape) - X, WB, _ = get_const_tuple(weight_packed.shape) - ######## Search space - x, y = cfg.axis(X), cfg.axis(Y) - db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(K) - ko, ki = cfg.define_split('tile_k', k, num_outputs=2) - yo, yi = cfg.define_split('tile_y', y, num_outputs=2) - xo, xi = cfg.define_split('tile_x', x, num_outputs=2) - - cfg.define_reorder('reorder_0', [yo, xo, ko, yi, wb, db, ki, xi], - policy='candidate', candidate=[ - [yo, xo, ko, yi, wb, db, ki, xi], - [yo, xo, yi, ko, wb, db, ki, xi]]) - - cfg.define_annotate('ann_reduce', [db, wb], policy='try_unroll') - cfg.define_annotate('ann_spatial', [yi, xi], policy='try_unroll_vec') - - ###### Compute rule - VX = cfg['tile_x'].size[-1] - - wvshape = (X//VX, WB, VX, K) - oshape = (Y, X) - - k = tvm.reduce_axis((0, K), name='k') - db = tvm.reduce_axis((0, DB), name='db') - wb = tvm.reduce_axis((0, WB), name='wb') - - # Tile data and weights - weight_vec = tvm.compute(wvshape, lambda xo, wb, vx, k: - weight_packed[xo*VX+vx][wb][k], name='weight_vec') - - idxdiv = tvm.indexdiv - idxmod = tvm.indexmod - - matmul_unipolar = tvm.compute(oshape, lambda i, j: tvm.sum( - (tvm.popcount(weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]) - - tvm.popcount(~weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]) - ).astype(out_dtype) - << (db+wb).astype(out_dtype), axis=[wb, db, k]), tag='bitserial_dense_unipolar') - - matmul = tvm.compute(oshape, lambda i, j: tvm.sum( - tvm.popcount(weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k] - ).astype(out_dtype) - << (db+wb).astype(out_dtype), axis=[wb, db, k]), tag='bitserial_dense') - - # binary ops - cfg.add_flop(2 * Y * X * K * binary_op_multiplier(pack_dtype)) - - if unipolar: - return matmul_unipolar - return matmul diff --git a/topi/python/topi/nn/conv1d.py b/topi/python/topi/nn/conv1d.py index 98fa2e3d70013..4565fd2f5a461 100644 --- a/topi/python/topi/nn/conv1d.py +++ b/topi/python/topi/nn/conv1d.py @@ -23,7 +23,6 @@ from .util import get_pad_tuple1d -@tvm.target.generic_func def conv1d(data, kernel, strides=1, @@ -101,6 +100,13 @@ def conv1d_ncw(data, out_dtype : str The output data type. If None then output is same type as input. """ + if out_dtype is None: + out_dtype = data.dtype + if isinstance(strides, (tuple, list)): + strides = strides[0] + if isinstance(dilation, (tuple, list)): + dilation = dilation[0] + batch, in_channels, data_width = data.shape out_channels, _, kernel_size = kernel.shape @@ -158,6 +164,13 @@ def conv1d_nwc(data, out_dtype : str The output data type. If None then output is same type as input. """ + if out_dtype is None: + out_dtype = data.dtype + if isinstance(strides, (tuple, list)): + strides = strides[0] + if isinstance(dilation, (tuple, list)): + dilation = dilation[0] + batch, data_width, in_channels = data.shape kernel_size, _, out_channels = kernel.shape diff --git a/topi/python/topi/nn/conv1d_transpose.py b/topi/python/topi/nn/conv1d_transpose.py index 39918e90c3173..8d224247db011 100644 --- a/topi/python/topi/nn/conv1d_transpose.py +++ b/topi/python/topi/nn/conv1d_transpose.py @@ -24,7 +24,6 @@ from .util import get_pad_tuple1d -@tvm.target.generic_func def conv1d_transpose_ncw(data, kernel, stride, padding, out_dtype): """Transposed 1D convolution ncw forward operator. diff --git a/topi/python/topi/nn/conv2d.py b/topi/python/topi/nn/conv2d.py index 52f4b12a1d2dc..0d73c8b0b866c 100644 --- a/topi/python/topi/nn/conv2d.py +++ b/topi/python/topi/nn/conv2d.py @@ -23,7 +23,7 @@ from .pad import pad from .util import get_pad_tuple -from ..util import simplify, get_const_tuple, get_const_int +from ..util import simplify, get_const_tuple, get_const_int, tag from .winograd_util import winograd_transform_matrices # workload description of conv2d @@ -31,7 +31,6 @@ ['in_dtype', 'out_dtype', 'height', 'width', 'in_filter', 'groups', 'out_filter', 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride']) -@tvm.target.generic_func def conv2d(input, filter, strides, padding, dilation, layout='NCHW', out_dtype=None): """Conv2D operator. @@ -96,7 +95,7 @@ def conv2d_legalize(attrs, inputs, types): @tvm.target.generic_func -def conv2d_alter_layout(attrs, inputs, tinfos, F): +def conv2d_alter_layout(attrs, inputs, tinfos, out_type): """Change Conv2D layout. Parameters @@ -107,13 +106,12 @@ def conv2d_alter_layout(attrs, inputs, tinfos, F): Grouped input symbols tinfos : list Input shape and dtype - F: symbol - The context, can be either relay.op + out_type: type + The output type Note ---- - Unlike other TOPI functions, this function operates on both graph level and operator level, - so we have to pass 'F' to make it support our two versions of graph IR, Relay. + Unlike other TOPI functions, this function operates on both graph level and operator level. """ # not to change by default return None @@ -368,7 +366,6 @@ def conv2d_nhwc(Input, Filter, stride, padding, dilation, out_dtype='float32'): return Output -@tvm.target.generic_func def conv2d_NCHWc(data, kernel, stride, padding, dilation, layout, out_layout, out_dtype='float32'): """Conv2D operator for nChw[x]c layout. @@ -408,58 +405,9 @@ def conv2d_NCHWc(data, kernel, stride, padding, dilation, layout, out_layout, ou 5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block] """ - return conv2d_NCHWc_compute(data, - kernel, - stride, - padding, - dilation, - layout, - out_layout, - out_dtype) - - -def conv2d_NCHWc_compute(data, kernel, strides, padding, dilation, layout, out_layout, out_dtype): - """Conv2D operator compute for nChw[x]c layout. - - Parameters - ---------- - data : tvm.Tensor - 5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block] - - kernel : tvm.Tensor - 6-D with shape - [num_filter_chunk, in_channel_chunk, filter_height, filter_width, - in_channel_block, num_filter_block] - - stride : int or a list/tuple of two ints - stride size, or [stride_height, stride_width] - - padding : int or a list/tuple of 2 or 4 ints - padding size, or - [pad_height, pad_width] for 2 ints, or - [pad_top, pad_left, pad_bottom, pad_right] for 4 ints - - dilation: int or a list/tuple of two ints - dilation size, or [dilation_height, dilation_width] - - layout : str - Input data layout - - out_layout : str - Output data layout - - out_dtype : str - output data type - - Returns - ------- - output : tvm.Tensor - 5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block] - """ - # layout and out_layout are not used here, # we keep them for debug convenience when dumping autotvm workload - HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) + HSTR, WSTR = stride if isinstance(stride, (tuple, list)) else (stride, stride) dilation_h, dilation_w = dilation if isinstance(dilation, (tuple, list)) \ else (dilation, dilation) @@ -516,8 +464,7 @@ def conv2d_NCHWc_compute(data, kernel, strides, padding, dilation, layout, out_l name='conv2d_NCHWc', tag="conv2d_NCHWc") -@tvm.target.generic_func -def conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, layout, out_layout, +def conv2d_NCHWc_int8(data, kernel, stride, padding, dilation, layout, out_layout, out_dtype='int32'): """Conv2D operator for nChw[x]c layout. @@ -557,59 +504,9 @@ def conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, layout, out_layo 5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block] """ - return conv2d_NCHWc_int8_compute(data, - kernel, - strides, - padding, - dilation, - layout, - out_layout, - out_dtype) - - -def conv2d_NCHWc_int8_compute(data, kernel, strides, padding, dilation, layout, out_layout, - out_dtype='int32'): - """Conv2D operator for nChw[x]c layout. - - Parameters - ---------- - data : tvm.Tensor - 5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block] - - kernel : tvm.Tensor - 7-D with shape - [num_filter_chunk, in_channel_chunk, filter_height, filter_width, in_channel_block/4, - num_filter_block, 4] - - stride : int or a list/tuple of two ints - stride size, or [stride_height, stride_width] - - padding : int or a list/tuple of 2 or 4 ints - padding size, or - [pad_height, pad_width] for 2 ints, or - [pad_top, pad_left, pad_bottom, pad_right] for 4 ints - - dilation: int or a list/tuple of two ints - dilation size, or [dilation_height, dilation_width] - - layout : str - Input data layout - - out_layout : str - Output data layout - - out_dtype : str - output data type - - Returns - ------- - output : tvm.Tensor - 5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block] - """ - # layout and out_layout are not used here, # we keep them for debug convenience when dumping autotvm workload - HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) + HSTR, WSTR = stride if isinstance(stride, (tuple, list)) else (stride, stride) dilation_h, dilation_w = dilation if isinstance(dilation, (tuple, list)) \ else (dilation, dilation) @@ -724,33 +621,6 @@ def conv2d_winograd_weight_transform(kernel, tile_size): axis=[r_kh, r_kw]), name='transform_weight') -@tvm.target.generic_func -def conv2d_winograd_without_weight_transform(input, filter, strides, padding, dilation, - layout, out_dtype, tile_size): - """Compute convolution in winograd algorithm. The filter is supposed to be transformed - in advance. - - Parameters - ---------- - input : tvm.Tensor - 4-D with shape [batch, in_height, in_width, in_channel] - filter : tvm.Tensor - 4-D with shape [filter_height, filter_width, in_channel, num_filter] - strides : int or a list/tuple of two ints - Stride size, or [stride_height, stride_width] - padding : int or str - Padding size, or ['VALID', 'SAME'] - tile_size: int - Tile size of winograd transform. e.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3) - - Returns - ------- - output : tvm.Tensor - 4-D with shape [batch, out_height, out_width, out_channel] - """ - raise ValueError("missing register for topi.nn.conv2d_winograd_without_weight_transform") - - def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_dtype): """Weight transformation for winograd Parameters @@ -769,32 +639,7 @@ def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_d return nnpack.convolution_inference_weight_transform( kernel, algorithm=convolution_algorithm, dtype=out_dtype) -@tvm.target.generic_func -def conv2d_winograd_nnpack_without_weight_transform( - input, filter, bias, strides, padding, dilation, layout, out_dtype): - """Compute convolution in winograd algorithm. The filter is supposed to be transformed - in advance. - Parameters - ---------- - input : tvm.Tensor - 4-D with shape [batch, in_height, in_width, in_channel] - filter : tvm.Tensor - 4-D with shape [num_filter, in_channel, 8, 8] - bias : tvm.Tensor - 1-D with shape [num_filter] - strides : int or a list/tuple of two ints - Stride size, or [stride_height, stride_width] - padding : int or str - Padding size, or ['VALID', 'SAME'] - Returns - ------- - output : tvm.Tensor - 4-D with shape [batch, out_height, out_width, out_channel] - """ - raise ValueError("missing register for topi.nn.conv2d_winograd_without_weight_transform") - -@tvm.target.generic_func def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtype=None): """Group convolution operator in NCHW layout. @@ -871,3 +716,20 @@ def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtyp xx * stride_w + rx * dilation_w].astype(out_dtype) * Filter[ff, rc, ry, rx].astype(out_dtype), axis=[rc, ry, rx]), tag='group_conv2d_nchw') + + +def unpack_NCHWc_to_nchw(packed_out, out_dtype): + n, oc_chunk, oh, ow, oc_bn = get_const_tuple(packed_out.shape) + + idxmod = tvm.indexmod + idxdiv = tvm.indexdiv + + oshape = (n, oc_chunk * oc_bn, oh, ow) + unpacked_out = \ + tvm.compute(oshape, + lambda n, c, h, w: + packed_out[n, idxdiv(c, oc_bn), h, w, idxmod(c, oc_bn)] + .astype(out_dtype), + name='output_unpack', + tag=tag.INJECTIVE+",unpack_nchwc") + return unpacked_out \ No newline at end of file diff --git a/topi/python/topi/nn/conv2d_transpose.py b/topi/python/topi/nn/conv2d_transpose.py index e635f43cdbc49..db132fc81f132 100644 --- a/topi/python/topi/nn/conv2d_transpose.py +++ b/topi/python/topi/nn/conv2d_transpose.py @@ -25,7 +25,6 @@ from ..util import simplify -@tvm.target.generic_func def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype): """Transposed 2D convolution nchw forward operator. diff --git a/topi/python/topi/nn/conv3d.py b/topi/python/topi/nn/conv3d.py index 83c16dae7ac44..a37d9894d4c3e 100644 --- a/topi/python/topi/nn/conv3d.py +++ b/topi/python/topi/nn/conv3d.py @@ -25,46 +25,8 @@ from ..util import simplify -@tvm.target.generic_func -def conv3d(input, filter, strides, padding, dilation, layout='NCDHW', out_dtype=None): - """Conv3D operator. - - Parameters - ---------- - input : tvm.Tensor - 5-D with shape [batch, in_depth, in_channel, in_height, in_width] - - filter : tvm.Tensor - 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width] - - strides : int or a list/tuple of three ints - stride size, or [stride_depth, stride_height, stride_width] - - padding : int or a list/tuple of three ints - padding size, or [pad_depth, pad_height, pad_width] - - dilation: int or a list/tuple of three ints - dilation size, or [dilation_depth, dilation_height, dilation_width] - - layout : str - layout of data - - Returns - ------- - output : tvm.Tensor - 5-D with shape [batch, out_depth, out_channel, out_height, out_width] - """ - # search platform specific declaration first - # default declaration - if layout == 'NCDHW': - return conv3d_ncdhw(input, filter, strides, padding, dilation, out_dtype) - elif layout == 'NDHWC': - return conv3d_ndhwc(input, filter, strides, padding, dilation, out_dtype) - raise ValueError("not support this layout {} yet".format(layout)) - - -def conv3d_ncdhw(Input, Filter, stride, padding, dilation, out_dtype=None): - """Convolution operator in NCDHW layout. +def conv3d_ncdhw(Input, Filter, stride, padding, dilation, layout='NCDHW', out_dtype=None): + """Conv3D operator in NCDHW layout. Parameters ---------- @@ -88,6 +50,7 @@ def conv3d_ncdhw(Input, Filter, stride, padding, dilation, out_dtype=None): Output : tvm.Tensor 5-D with shape [batch, out_channel, out_depth, out_height, out_width] """ + assert layout == "NCDHW" if out_dtype is None: out_dtype = Input.dtype assert isinstance(stride, int) or len(stride) == 3 @@ -132,7 +95,7 @@ def conv3d_ncdhw(Input, Filter, stride, padding, dilation, out_dtype=None): axis=[rc, rz, ry, rx]), tag="conv3d_ncdhw") -def conv3d_ndhwc(Input, Filter, stride, padding, dilation, out_dtype='float32'): +def conv3d_ndhwc(Input, Filter, stride, padding, dilation, layout='NDHWC', out_dtype='float32'): """Convolution operator in NDHWC layout. Parameters @@ -157,6 +120,7 @@ def conv3d_ndhwc(Input, Filter, stride, padding, dilation, out_dtype='float32'): Output : tvm.Tensor 5-D with shape [batch, out_channel, out_depth, out_height, out_width] """ + assert layout == "NDHWC" assert isinstance(stride, int) or len(stride) == 3 assert isinstance(dilation, int) or len(dilation) == 3 diff --git a/topi/python/topi/nn/deformable_conv2d.py b/topi/python/topi/nn/deformable_conv2d.py index 2417411efc37d..251f68aa8c258 100644 --- a/topi/python/topi/nn/deformable_conv2d.py +++ b/topi/python/topi/nn/deformable_conv2d.py @@ -22,7 +22,6 @@ from ..util import get_const_tuple from ..cpp.util import bilinear_sample_nchw -@tvm.target.generic_func def deformable_conv2d_nchw(data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype): """Deformable conv2D operator in NCHW layout. diff --git a/topi/python/topi/nn/dense.py b/topi/python/topi/nn/dense.py index 671b602edc30a..fe21e7417bdad 100644 --- a/topi/python/topi/nn/dense.py +++ b/topi/python/topi/nn/dense.py @@ -19,7 +19,7 @@ import tvm from .. import tag -def dense_default(data, weight, bias=None, out_dtype=None): +def dense(data, weight, bias=None, out_dtype=None): """The default implementation of dense in topi. Parameters @@ -59,29 +59,3 @@ def dense_default(data, weight, bias=None, out_dtype=None): lambda i, j: matmul[i, j] + bias[j].astype(out_dtype), \ tag=tag.BROADCAST) return matmul - - -@tvm.target.override_native_generic_func("dense") -def dense(data, weight, bias=None, out_dtype=None): - """Applies a linear transformation: :math:`Y = XW^T + b`. - - Parameters - ---------- - data : tvm.Tensor - 2-D with shape [batch, in_dim] - - weight : tvm.Tensor - 2-D with shape [out_dim, in_dim] - - bias : tvm.Tensor, optional - 1-D with shape [out_dim] - - out_dtype : str - The output type. This is used for mixed precision. - - Returns - ------- - output : tvm.Tensor - 2-D with shape [batch, out_dim] - """ - return dense_default(data, weight, bias, out_dtype) diff --git a/topi/python/topi/nn/depthwise_conv2d.py b/topi/python/topi/nn/depthwise_conv2d.py index f50e357a3bb8c..49aaace0f833b 100644 --- a/topi/python/topi/nn/depthwise_conv2d.py +++ b/topi/python/topi/nn/depthwise_conv2d.py @@ -47,7 +47,6 @@ def _get_workload(data, kernel, stride, padding, out_dtype): out_channel, kh, kw, HPAD, WPAD, HSTR, WSTR) -@tvm.target.generic_func def depthwise_conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=None): """Depthwise convolution nchw forward operator. @@ -121,7 +120,6 @@ def depthwise_conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=No return Output -@tvm.target.generic_func def depthwise_conv2d_nhwc(Input, Filter, stride, padding, dilation, out_dtype=None): """Depthwise convolution nhwc forward operator. @@ -307,7 +305,6 @@ def depthwise_conv2d_backward_weight_nhwc(Input, Out_grad, oshape, fshape, strid return Weight_grad -@tvm.target.generic_func def depthwise_conv2d_NCHWc(Input, Filter, stride, padding, dilation, layout, out_layout, out_dtype=None): """Depthwise convolution NCHW[x]c forward operator. diff --git a/topi/python/topi/nn/local_response_norm.py b/topi/python/topi/nn/local_response_norm.py index de002bfffbe62..1b41c7dbfb5e3 100644 --- a/topi/python/topi/nn/local_response_norm.py +++ b/topi/python/topi/nn/local_response_norm.py @@ -17,10 +17,8 @@ # pylint: disable=invalid-name """TVM operator for local response norm compute.""" from __future__ import absolute_import -import tvm from .. import cpp -@tvm.target.generic_func def lrn(data, size, axis=1, alpha=0.0001, beta=0.75, bias=2): """Perform the across channels local response normalisation on the input data. diff --git a/topi/python/topi/nn/sparse.py b/topi/python/topi/nn/sparse.py index 584126ea20159..6974ff4a13abd 100644 --- a/topi/python/topi/nn/sparse.py +++ b/topi/python/topi/nn/sparse.py @@ -22,7 +22,6 @@ from ..util import get_const_tuple -@tvm.target.generic_func def sparse_dense(data, weight_data, weight_indices, weight_indptr): """ Computes sparse-dense matrix multiplication of `data` and @@ -105,7 +104,7 @@ def _compute_block(i, nb_j, j): lambda m, n: bsrmm_block[m, idxd(n, bs_r), idxm(n, bs_r)], tag="sparse_dense_bsrmm") -@tvm.target.generic_func + def sparse_transpose(sparse_data, sparse_indices, sparse_indptr): """ Transpose a square sparse matrix, @@ -148,14 +147,15 @@ def sparse_transpose(sparse_data, sparse_indices, sparse_indptr): shape=output_shape, inputs=[sparse_data, sparse_indices, sparse_indptr], fcompute=lambda ins, outs: - csr_transpose_ir(ins[0], ins[1], ins[2], outs[0], outs[1], outs[2]), + _csr_transpose_ir(ins[0], ins[1], ins[2], outs[0], outs[1], outs[2]), tag="sparse_transpose_csr", dtype=['float32', 'int32', 'int32'], name='out') return [output_data, output_indices, output_indptr] -def csr_transpose_ir(data, indices, indptr, out_data, out_indices, out_indptr): + +def _csr_transpose_ir(data, indices, indptr, out_data, out_indices, out_indptr): """define ir for csr_transpose""" irb = tvm.ir_builder.create() diff --git a/topi/python/topi/nn/util.py b/topi/python/topi/nn/util.py index aa73e849427b7..f0cdd9a0d3c26 100644 --- a/topi/python/topi/nn/util.py +++ b/topi/python/topi/nn/util.py @@ -143,7 +143,7 @@ def get_pad_tuple(padding, kernel): pad_h = padding[0] * 2 pad_w = padding[1] * 2 elif len(padding) == 4: - return padding[0], padding[1], padding[2], padding[3] + return padding[0], padding[1], padding[2], padding[3] else: raise ValueError("Size of padding can only be 2 or 4") elif isinstance(padding, int): diff --git a/topi/python/topi/opengl/conv2d_nchw.py b/topi/python/topi/opengl/conv2d_nchw.py index e39d1ad805b08..52ed11972e6fa 100644 --- a/topi/python/topi/opengl/conv2d_nchw.py +++ b/topi/python/topi/opengl/conv2d_nchw.py @@ -18,9 +18,7 @@ """Schedule for conv2d_nchw with auto fusion""" import tvm from .. import tag -from .. import generic -@generic.schedule_conv2d_nchw.register(["opengl"]) def schedule_conv2d_nchw(outs): """Schedule for conv2d_nchw. diff --git a/topi/python/topi/opengl/dense.py b/topi/python/topi/opengl/dense.py index c93dfccbeeced..db2c4a6779044 100644 --- a/topi/python/topi/opengl/dense.py +++ b/topi/python/topi/opengl/dense.py @@ -19,9 +19,7 @@ from __future__ import absolute_import as _abs import tvm from .. import tag -from .. import generic -@generic.schedule_dense.register(["opengl"]) def schedule_dense(outs): """Schedule for dense operator. diff --git a/topi/python/topi/opengl/injective.py b/topi/python/topi/opengl/injective.py index d3ebc943b9629..28dc87d1a5fb9 100644 --- a/topi/python/topi/opengl/injective.py +++ b/topi/python/topi/opengl/injective.py @@ -17,9 +17,7 @@ # pylint: disable=invalid-name, unused-variable, """Schedule for composition of injective operator""" import tvm -from .. import generic -@generic.schedule_injective_from_existing.register(["opengl"]) def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -38,7 +36,6 @@ def schedule_injective_from_existing(sch, out): sch[out].opengl() return sch -@generic.schedule_injective.register(["opengl"]) def schedule_injective(outs): """Schedule for injective op. diff --git a/topi/python/topi/opengl/pooling.py b/topi/python/topi/opengl/pooling.py index 04c7b0cd00028..3226422048e5f 100644 --- a/topi/python/topi/opengl/pooling.py +++ b/topi/python/topi/opengl/pooling.py @@ -18,9 +18,7 @@ """Schedule for pooling operators""" import tvm from .. import tag -from .. import generic -@generic.schedule_adaptive_pool.register(["opengl"]) def schedule_adaptive_pool(outs): """Schedule for adaptive pool. @@ -69,7 +67,6 @@ def traverse(OP): return s -@generic.schedule_pool.register(["opengl"]) def schedule_pool(outs, layout): """Schedule for pool. diff --git a/topi/python/topi/opengl/softmax.py b/topi/python/topi/opengl/softmax.py index e343d4513241c..ff218d13c2b16 100644 --- a/topi/python/topi/opengl/softmax.py +++ b/topi/python/topi/opengl/softmax.py @@ -17,9 +17,7 @@ # pylint: disable=invalid-name, unused-variable, trailing-whitespace """Schedule for softmax operator""" import tvm -from .. import generic -@generic.schedule_softmax.register(["opengl"]) def schedule_softmax(outs): """Schedule for softmax op. diff --git a/topi/python/topi/rocm/conv2d.py b/topi/python/topi/rocm/conv2d.py index be29c6f6b0cc5..0daa4be58e6c3 100644 --- a/topi/python/topi/rocm/conv2d.py +++ b/topi/python/topi/rocm/conv2d.py @@ -20,13 +20,12 @@ from tvm import autotvm from tvm.contrib import miopen -from .. import nn, generic +from .. import generic from ..util import get_const_tuple -from ..cuda.conv2d import conv2d_cuda, schedule_conv2d_nchw_cuda from ..nn.util import get_pad_tuple -@autotvm.register_topi_compute(nn.conv2d, 'rocm', ['direct', 'winograd']) -def conv2d_rocm(cfg, data, kernel, strides, padding, dilation, layout='NCHW', out_dtype='float32'): +@autotvm.register_topi_compute("conv2d_nchw_miopen.rocm") +def conv2d_nchw_miopen(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'): """Conv2D operator for rocm backend. Parameters @@ -57,39 +56,34 @@ def conv2d_rocm(cfg, data, kernel, strides, padding, dilation, layout='NCHW', ou 4-D with shape [batch, out_channel, out_height, out_width] """ - target = tvm.target.Target.current() - if "miopen" in target.libs: - assert layout == 'NCHW', "Only NCHW layout is supported." - CO, CI, KH, KW = get_const_tuple(kernel.shape) - N, _, H, W = get_const_tuple(data.shape) - - # handle dilation - stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides - pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) - pad_h, pad_w = pt + pb, pl + pr - dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation - - OH = (H + 2 * pad_h - KH) // stride_h + 1 - OW = (W + 2 * pad_w - KW) // stride_w + 1 - cfg.add_flop(2 * N * OH * OW * CO * CI * ((KH - 1) * dilation_h + 1) *\ - ((KW - 1) * dilation_w + 1)) - - return miopen.conv2d_forward(data, - kernel, - stride_h, - stride_w, - pad_h, - pad_w, - dilation_h, - dilation_w, - conv_mode=0, - data_type=1) - - return conv2d_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dtype) - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_nchw, 'rocm', ["direct", 'winograd']) -def schedule_conv2d_nchw_rocm(cfg, outs): + CO, CI, KH, KW = get_const_tuple(kernel.shape) + N, _, H, W = get_const_tuple(data.shape) + + # handle dilation + stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides + pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW)) + pad_h, pad_w = pt + pb, pl + pr + dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation + + OH = (H + 2 * pad_h - KH) // stride_h + 1 + OW = (W + 2 * pad_w - KW) // stride_w + 1 + cfg.add_flop(2 * N * OH * OW * CO * CI * ((KH - 1) * dilation_h + 1) *\ + ((KW - 1) * dilation_w + 1)) + + return miopen.conv2d_forward(data, + kernel, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + conv_mode=0, + data_type=1) + + +@autotvm.register_topi_schedule("conv2d_nchw_miopen.rocm") +def schedule_conv2d_nchw_miopen(cfg, outs): """TOPI schedule callback of conv2d for rocm Parameters @@ -106,8 +100,4 @@ def schedule_conv2d_nchw_rocm(cfg, outs): s: Schedule The computation schedule for conv2d. """ - target = tvm.target.Target.current() - if target and "miopen" in target.libs: - return generic.schedule_extern(outs) - - return schedule_conv2d_nchw_cuda(cfg, outs) + return generic.schedule_extern(outs) diff --git a/topi/python/topi/rocm/dense.py b/topi/python/topi/rocm/dense.py index f2adeaabef61a..8729a62bd677f 100644 --- a/topi/python/topi/rocm/dense.py +++ b/topi/python/topi/rocm/dense.py @@ -20,13 +20,12 @@ import tvm from tvm import autotvm from tvm.contrib import rocblas -import topi -from ..nn.dense import dense, dense_default +from .. import generic, nn from .. import tag -from .. import generic +from ..util import traverse_inline -@autotvm.register_topi_compute(dense, "rocm", "direct") -def dense_rocm(cfg, data, weight, bias=None, out_dtype=None): +@autotvm.register_topi_compute('dense.rocm') +def dense(cfg, data, weight, bias=None, out_dtype=None): """Dense operator for rocm backend. Parameters @@ -54,21 +53,10 @@ def dense_rocm(cfg, data, weight, bias=None, out_dtype=None): assert len(bias.shape) == 1 if out_dtype is None: out_dtype = data.dtype - batch, in_dim = data.shape - out_dim, _ = weight.shape - target = tvm.target.Target.current() - if "rocblas" in target.libs: - assert out_dtype == data.dtype, "Mixed precision not supported." - matmul = rocblas.matmul(data, weight, False, True) - if bias is not None: - matmul = tvm.compute((batch, out_dim), \ - lambda i, j: matmul[i, j] + bias[j], \ - tag=tag.BROADCAST) - return matmul - return dense_default(data, weight, bias, out_dtype) - - -@autotvm.register_topi_schedule(generic.schedule_dense, "rocm", "direct") + return nn.dense(data, weight, bias, out_dtype) + + +@autotvm.register_topi_schedule('dense.rocm') def schedule_dense(cfg, outs): """Schedule for dense operator. @@ -83,7 +71,72 @@ def schedule_dense(cfg, outs): s: Schedule The computation schedule for dense. """ - target = tvm.target.Target.current() - if target.target_name == "rocm" and "rocblas" in target.libs: - return generic.schedule_extern(outs) - return topi.cuda.schedule_dense(cfg, outs) + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if op.tag == 'dense': + Dense = op.output(0) + num_thread = 64 + k = Dense.op.reduce_axis[0] + ko, kf = s[Dense].split(k, factor=num_thread) + DenseF = s.rfactor(Dense, kf) + + if Dense.op in s.outputs: + Out = Dense + else: + Out = outs[0].op.output(0) + s[Dense].compute_at(s[Out], s[Out].op.axis[1]) + s[Out].bind(s[Out].op.axis[0], tvm.thread_axis("blockIdx.y")) + s[Out].bind(s[Out].op.axis[1], tvm.thread_axis("blockIdx.x")) + + tx = s[Dense].op.reduce_axis[0] + thread_x = tvm.thread_axis("threadIdx.x") + s[Dense].bind(tx, thread_x) + s[DenseF].compute_at(s[Dense], tx) + s[Dense].set_store_predicate(thread_x.var.equal(0)) + s[Out].set_store_predicate(thread_x.var.equal(0)) + + traverse_inline(s, outs[0].op, _callback) + return s + + +@autotvm.register_topi_compute('dense_rocblas.rocm') +def dense_rocblas(cfg, data, weight, bias=None, out_dtype=None): + """Dense operator for rocm backend with cblas. + + Parameters + ---------- + data : tvm.Tensor + 2-D with shape [batch, in_dim] + + weight : tvm.Tensor + 2-D with shape [out_dim, in_dim] + + bias : tvm.Tensor, optional + 1-D with shape [out_dim] + + out_dtype : str + The output type. This is used for mixed precision. + + Returns + ------- + output : tvm.Tensor + 2-D with shape [batch, out_dim] + """ + assert out_dtype == data.dtype, "Mixed precision not supported." + matmul = rocblas.matmul(data, weight, False, True) + batch, in_dim = data.shape + out_dim, _ = weight.shape + cfg.add_flop(batch * in_dim * out_dim * 2) + if bias is not None: + matmul = tvm.compute((batch, out_dim), + lambda i, j: matmul[i, j] + bias[j], + tag=tag.BROADCAST) + return matmul + + +@autotvm.register_topi_schedule('dense_rocblas.rocm') +def schedule_dense_rocblas(_, outs): + """Schedule for dense operator with rocm cblas""" + return generic.schedule_extern(outs) diff --git a/topi/python/topi/rocm/nn.py b/topi/python/topi/rocm/nn.py index 8a9c8c393da6e..5f134cb32c984 100644 --- a/topi/python/topi/rocm/nn.py +++ b/topi/python/topi/rocm/nn.py @@ -17,12 +17,7 @@ """scheduler for normalization functions on rocm backend""" from __future__ import absolute_import as _abs -import tvm -from .. import generic from .. import cpp -@generic.schedule_lrn.register(["rocm", "gpu"]) def schedule_lrn(outs): - target = tvm.target.Target.current(allow_none=False) - cpp_target = cpp.TEST_create_target(target.target_name) - return cpp.rocm.schedule_lrn(cpp_target, outs) + return cpp.rocm.schedule_lrn(outs) diff --git a/topi/python/topi/sort.py b/topi/python/topi/sort.py index 22899c4232f75..96a088923d2dc 100644 --- a/topi/python/topi/sort.py +++ b/topi/python/topi/sort.py @@ -20,7 +20,6 @@ from tvm import api from .util import get_const_tuple -@tvm.target.generic_func def argsort(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"): """Performs sorting along the given axis and returns an array of indices having the same shape as an input array that index @@ -99,7 +98,6 @@ def argsort(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"): return out -@tvm.target.generic_func def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"): """Get the top k elements in an input tensor along the given axis. diff --git a/topi/python/topi/vision/nms.py b/topi/python/topi/vision/nms.py index 5bb36f7dfa747..c171f8ca5fe34 100644 --- a/topi/python/topi/vision/nms.py +++ b/topi/python/topi/vision/nms.py @@ -116,7 +116,7 @@ def hybrid_get_valid_counts(data, score_threshold, id_index, score_index, one): out_tensor[i, j, k] = -one return valid_count, out_tensor -@tvm.target.generic_func + def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1): """Get valid count of bounding boxes given a score threshold. Also moves valid boxes to the top of input data. @@ -289,7 +289,6 @@ def hybrid_nms(data, sorted_index, valid_count, return output, box_indices -@tvm.target.generic_func def non_max_suppression(data, valid_count, max_output_size=-1, iou_threshold=0.5, force_suppress=False, top_k=-1, coord_start=2, score_index=1, id_index=0, diff --git a/topi/python/topi/vision/rcnn/proposal.py b/topi/python/topi/vision/rcnn/proposal.py index d48c89078ec0d..5de4998c066cf 100644 --- a/topi/python/topi/vision/rcnn/proposal.py +++ b/topi/python/topi/vision/rcnn/proposal.py @@ -317,7 +317,7 @@ def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf): body = ib.get() return body -@tvm.target.generic_func + def proposal(cls_prob, bbox_pred, im_info, scales, ratios, feature_stride, threshold, rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_min_size, iou_loss): """Proposal operator. diff --git a/topi/python/topi/vision/rcnn/roi_align.py b/topi/python/topi/vision/rcnn/roi_align.py index a6540b3666a55..a0bc5e2915972 100644 --- a/topi/python/topi/vision/rcnn/roi_align.py +++ b/topi/python/topi/vision/rcnn/roi_align.py @@ -21,7 +21,6 @@ from ...cpp.util import bilinear_sample_nchw -@tvm.target.generic_func def roi_align_nchw(data, rois, pooled_size, spatial_scale, sample_ratio=-1): """ROI align operator in NCHW layout. diff --git a/topi/python/topi/vision/rcnn/roi_pool.py b/topi/python/topi/vision/rcnn/roi_pool.py index 53ffe35e7e1b6..f346f580b3ba9 100644 --- a/topi/python/topi/vision/rcnn/roi_pool.py +++ b/topi/python/topi/vision/rcnn/roi_pool.py @@ -19,7 +19,6 @@ import tvm from ...util import get_const_tuple -@tvm.target.generic_func def roi_pool_nchw(data, rois, pooled_size, spatial_scale): """ROI pool operator in NCHW layout. diff --git a/topi/python/topi/vision/reorg.py b/topi/python/topi/vision/reorg.py index 7adfc73d9be1f..3ba5e8495a223 100644 --- a/topi/python/topi/vision/reorg.py +++ b/topi/python/topi/vision/reorg.py @@ -20,10 +20,8 @@ Reorg operator, used in darknet. """ from __future__ import absolute_import as _abs -import tvm from .. import cpp -@tvm.target.generic_func def reorg(data, stride): """Reorg forward operators. diff --git a/topi/python/topi/vision/ssd/multibox.py b/topi/python/topi/vision/ssd/multibox.py index 8c31f823cbe41..4309af4303f1c 100644 --- a/topi/python/topi/vision/ssd/multibox.py +++ b/topi/python/topi/vision/ssd/multibox.py @@ -89,7 +89,6 @@ def hybrid_multibox_prior(data, sizes, ratios, steps, offsets): return output -@tvm.target.generic_func def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False): """Generate prior(anchor) boxes from data, sizes and ratios. @@ -233,7 +232,6 @@ def hybrid_multibox_transform_loc(cls_prob, loc_pred, anchor, return out_loc, valid_count -@tvm.target.generic_func def multibox_transform_loc(cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)): """Location transformation for multibox detection @@ -267,7 +265,6 @@ def multibox_transform_loc(cls_prob, loc_pred, anchor, clip=True, threshold=0.01 tvm.const(threshold, "float32"), tvm.convert(variances)) -@tvm.target.generic_func def multibox_detection(cls_prob, loc_pred, anchor, clip=True, threshold=0.01, nms_threshold=0.5, force_suppress=False, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=-1): """Convert multibox detection predictions. diff --git a/topi/python/topi/x86/__init__.py b/topi/python/topi/x86/__init__.py index d1c728d7b75ce..ce07c194268a3 100644 --- a/topi/python/topi/x86/__init__.py +++ b/topi/python/topi/x86/__init__.py @@ -19,9 +19,9 @@ """x86 specific declaration and schedules.""" from __future__ import absolute_import as _abs -from .conv1d import schedule_conv1d_nwc -from .conv2d import schedule_conv2d, schedule_conv2d_nhwc -from .conv3d import schedule_conv3d_ndhwc +from .conv1d import * +from .conv2d import * +from .conv3d import * from .binarize_pack import schedule_binarize_pack from .binary_dense import schedule_binary_dense from .nn import * @@ -29,12 +29,12 @@ from .injective import * from .reduction import * from .pooling import schedule_pool, schedule_adaptive_pool -from .bitserial_conv2d import schedule_bitserial_conv2d -from .bitserial_dense import schedule_bitserial_dense -from .depthwise_conv2d import schedule_depthwise_conv2d_NCHWc -from .dense import _schedule_dense, _schedule_dense_pack, _schedule_dense_nopack -from .batch_matmul import schedule_batch_matmul +from .bitserial_conv2d import * +from .bitserial_dense import * +from .depthwise_conv2d import * +from .dense import * +from .batch_matmul import * from .roi_align import roi_align_nchw -from .conv2d_transpose import _schedule_conv2d_transpose_nchw +from .conv2d_transpose import * from .sparse import * from .conv2d_alter_op import * diff --git a/topi/python/topi/x86/batch_matmul.py b/topi/python/topi/x86/batch_matmul.py index fef6c48d6bedc..a7cb9e98f11fa 100644 --- a/topi/python/topi/x86/batch_matmul.py +++ b/topi/python/topi/x86/batch_matmul.py @@ -21,12 +21,12 @@ from tvm import autotvm from tvm.autotvm.task.space import SplitEntity from tvm.contrib import cblas -from .. import generic, nn +from .. import generic from ..util import traverse_inline, get_const_tuple, get_max_power2_factor -@autotvm.register_topi_compute(nn.batch_matmul, "cpu", "direct") -def _declaration_batch_matmul_nopack(cfg, x, y): +@autotvm.register_topi_compute("batch_matmul.x86") +def batch_matmul(cfg, x, y): """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are data in batch. @@ -43,10 +43,6 @@ def _declaration_batch_matmul_nopack(cfg, x, y): output : tvm.Tensor 3-D with shape [batch, M, N] """ - target = tvm.target.Target.current() - if "cblas" in target.libs: - return cblas.batch_matmul(x, y, False, True) - assert len(x.shape) == 3 and len( y.shape) == 3, "only support 3-dim batch_matmul" XB, M, XK = get_const_tuple(x.shape) @@ -56,7 +52,7 @@ def _declaration_batch_matmul_nopack(cfg, x, y): B = XB K = XK if cfg.is_fallback: - _default_batch_matmul_nopack_config(cfg, M, N, K) + _default_batch_matmul_config(cfg, M, N, K) k = tvm.reduce_axis((0, K), name='k') C = tvm.compute( @@ -66,7 +62,7 @@ def _declaration_batch_matmul_nopack(cfg, x, y): return C -@autotvm.register_topi_schedule(generic.schedule_batch_matmul, "cpu", "direct") +@autotvm.register_topi_schedule("batch_matmul.x86") def schedule_batch_matmul(cfg, outs): """Schedule for batch_matmul @@ -83,10 +79,6 @@ def schedule_batch_matmul(cfg, outs): sch: Schedule The computation schedule for the op. """ - target = tvm.target.Target.current() - if "cblas" in target.libs: - return generic.schedule_extern(outs) - s = tvm.create_schedule([x.op for x in outs]) def _callback(op): @@ -131,9 +123,42 @@ def _callback(op): return s -def _default_batch_matmul_nopack_config(cfg, M, N, K): +def _default_batch_matmul_config(cfg, M, N, K): cfg["tile_k"] = SplitEntity([K // 16, 16]) x_bn = get_max_power2_factor(N, 8) cfg["tile_x"] = SplitEntity([N // x_bn, x_bn]) y_bn = get_max_power2_factor(M, 8) cfg["tile_y"] = SplitEntity([M // y_bn, y_bn]) + + +@autotvm.register_topi_compute("batch_matmul_cblas.x86") +def batch_matmul_cblas(cfg, x, y): + """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are + data in batch. + + Parameters + ---------- + cfg : ConfigSpace + Autotvm tuning space config file + x : tvm.Tensor + 3-D with shape [batch, M, K] + y : tvm.Tensor + 3-D with shape [batch, N, K] + Returns + ------- + output : tvm.Tensor + 3-D with shape [batch, M, N] + """ + assert len(x.shape) == 3 and len( + y.shape) == 3, "only support 3-dim batch_matmul" + XB, M, XK = get_const_tuple(x.shape) + YB, N, YK = get_const_tuple(y.shape) + assert XB == YB, "batch dimension doesn't match" + assert XK == YK, "shapes of x and y is inconsistant" + cfg.add_flop(XB * M * N * XK * 2) + return cblas.batch_matmul(x, y, False, True) + + +@autotvm.register_topi_schedule("batch_matmul_cblas.x86") +def schedule_batch_matmul_cblas(_, outs): + return generic.schedule_extern(outs) diff --git a/topi/python/topi/x86/bitserial_conv2d.py b/topi/python/topi/x86/bitserial_conv2d.py index 97d0dc0eefaa6..2ec5653756540 100644 --- a/topi/python/topi/x86/bitserial_conv2d.py +++ b/topi/python/topi/x86/bitserial_conv2d.py @@ -18,12 +18,237 @@ """Bitserial conv2d schedule on x86""" import tvm from tvm import autotvm -from topi.util import get_const_int -from .. import generic, tag +from .. import tag +from ..util import get_const_int, get_const_tuple +from ..nn.pad import pad +from ..nn.util import get_pad_tuple +from ..nn.bitserial_util import bitpack, binary_op_multiplier + +@autotvm.register_topi_compute("bitserial_conv2d_nchw.x86") +def bitserial_conv2d_nchw(cfg, data, kernel, stride, padding, in_bits, weight_bits, + pack_dtype='uint32', out_dtype='int16', unipolar=True): + """ Compute convolution with pack on spatial axes. """ + assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" + data_q = bitpack(data, in_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype) + # Check if kernel is already bitpacked + if len(kernel.shape) == 4: + kernel_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype) + KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape) + else: + kernel_vec = kernel + OCO, _, KH, KW, KB, VC = get_const_tuple(kernel_vec.shape) + CO = OCO * VC + + IB, N, CI, H, W = get_const_tuple(data_q.shape) + KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape) + + if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2): + TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel) + else: + TPAD, LPAD, DPAD, RPAD = padding + pad_before = [0, 0, 0, TPAD, LPAD] + pad_after = [0, 0, 0, DPAD, RPAD] -@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_conv2d_nchw, ['cpu'], 'direct') -@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_conv2d_nhwc, ['cpu'], 'direct') -def schedule_bitserial_conv2d(cfg, outs): + if isinstance(stride, (tuple, list)): + HSTR, WSTR = stride + else: + HSTR, WSTR = stride, stride + HCAT, WCAT = KH-1, KW-1 + + TH = H + TPAD + DPAD + TW = W + LPAD + RPAD + OH = (H + TPAD + DPAD - KH) // HSTR + 1 + OW = (W + LPAD + RPAD - KW) // WSTR + 1 + + # ==================== define configuration space ==================== + n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW) + ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW) + ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits) + + co, vc = cfg.define_split('tile_co', co, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + cfg.define_annotate('ann_reduce', [ib, kb, kh, kw], policy='try_unroll') + + cfg.define_reorder("reorder_0", + [n, co, oh, ow, vc, vh, vw, kh, kw, kb, ib, ci], + policy='interval_all', interval=(6, 11)) + # binary ops + cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype)) + # ==================== + + VC = cfg["tile_co"].size[-1] + VH = cfg["tile_oh"].size[-1] + VW = cfg["tile_ow"].size[-1] + + dvshape = (1, TH//(VH*HSTR), TW//(VW*WSTR), CI, VH*HSTR+HCAT, VW*WSTR+WCAT, IB) + kvshape = (CO//VC, CI, KH, KW, KB, VC) + ovshape = (1, CO//VC, OH//VH, OW//VW, VH, VW, VC) + oshape = (1, CO, OH, OW) + + if (TPAD != 0 and RPAD != 0): + data_pad = pad(data_q, pad_before, pad_after, name="data_pad") + else: + data_pad = data_q + + data_vec = tvm.compute(dvshape, lambda n, h, w, ci, vh, vw, b: \ + data_pad[b][n][ci][h*VH*HSTR+vh][w*VW*WSTR+vw], name='data_vec') + + if len(kernel.shape) == 4: + kernel_vec = tvm.compute(kvshape, lambda co, ci, dh, dw, b, vc: \ + kernel_q[b][co*VC+vc][ci][dh][dw], name='kernel_vec') + + ci = tvm.reduce_axis((0, CI), name='ci') + dh = tvm.reduce_axis((0, KH), name='dh') + dw = tvm.reduce_axis((0, KW), name='dw') + b1 = tvm.reduce_axis((0, IB), name='ib') + b2 = tvm.reduce_axis((0, KB), name='kb') + + def _conv(n, co, h, w, vh, vw, vc): + b1b2 = (b1+b2).astype(out_dtype) + if unipolar: + return tvm.sum((tvm.popcount( + data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1].astype(out_dtype) & + kernel_vec[co, ci, dh, dw, b2, vc].astype(out_dtype)) - + tvm.popcount( + data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1].astype(out_dtype) + & ~kernel_vec[co, ci, dh, dw, b2, vc]).astype(out_dtype)) << b1b2, + axis=[ci, dh, dw, b1, b2]) + + return tvm.sum((tvm.popcount( + data_vec[n, h, w, ci, vh*HSTR+dh, vw*WSTR+dw, b1] & + kernel_vec[co, ci, dh, dw, b2, vc])).astype(out_dtype) << b1b2, + axis=[ci, dh, dw, b1, b2]) + + conv = tvm.compute(ovshape, _conv, name='conv_out') + idxd = tvm.indexdiv + idxm = tvm.indexmod + + return tvm.compute( + oshape, lambda n, co, h, w: + conv[n, + idxd(co, VC), idxd(h, VH), idxd(w, VW), + idxm(h, VH), idxm(w, VW), idxm(co, VC)], + name='conv_vec', tag='spatial_bitserial_conv_nchw') + +@autotvm.register_topi_compute("bitserial_conv2d_nhwc.x86") +def bitserial_conv2d_nhwc(cfg, data, kernel, stride, padding, in_bits, weight_bits, + pack_dtype='uint32', out_dtype='int16', unipolar=True): + """ Compute convolution with pack on spatial axes. """ + assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" + data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype) + pack_kernel = len(kernel.shape) == 4 + + if pack_kernel: + kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype) + else: + kernel_q = kernel + + KH, KW, _, CO, KB = get_const_tuple(kernel_q.shape) + N, H, W, CI, IB = get_const_tuple(data_q.shape) + + if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2): + TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel) + else: + TPAD, LPAD, DPAD, RPAD = padding + pad_before = [0, TPAD, LPAD, 0, 0] + pad_after = [0, DPAD, RPAD, 0, 0] + + if isinstance(stride, (tuple, list)): + HSTR, WSTR = stride + else: + HSTR, WSTR = stride, stride + HCAT, WCAT = KH-1, KW-1 + + PAD_H = H + (TPAD + DPAD) + PAD_W = W + (LPAD + RPAD) + OH = (PAD_H - KH) // HSTR + 1 + OW = (PAD_W - KW) // WSTR + 1 + oshape = (1, OH, OW, CO) + + # ==================== define configuration space ==================== + n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO) + ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW) + ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits) + + co, vc = cfg.define_split('tile_co', co, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2, + filter=lambda x: max(x.size[1:]) <= 16) + cfg.define_annotate('ann_reduce', [ib, kb, kh, kw], policy='try_unroll') + cfg.define_reorder("reorder_0", + [n, oh, ow, co, vh, vw, kh, kw, kb, ib, vc, ci], + policy='interval_all', interval=(3, 7)) + # binary ops + cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype)) + # ==================== + + VC = cfg["tile_co"].size[-1] + VH = cfg["tile_oh"].size[-1] + VW = cfg["tile_ow"].size[-1] + + dvshape = (1, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, CI, IB) + kvshape = (CO, KH, KW, CI, VC, KB) + ovshape = (1, OH, OW, CO, VH, VW, VC) + oshape = (1, OH, OW, CO) + + if (DPAD != 0 and RPAD != 0): + data_pad = pad(data_q, pad_before, pad_after, name="data_pad") + else: + data_pad = data_q + + data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, ci, b: \ + data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][ci][b], name='data_vec') + + kernel_vec = tvm.compute(kvshape, lambda co, dh, dw, ci, vc, b: \ + kernel_q[dh][dw][ci][co*VC+vc][b], name='kernel_vec') + + ci = tvm.reduce_axis((0, CI), name='ci') + dh = tvm.reduce_axis((0, KH), name='dh') + dw = tvm.reduce_axis((0, KW), name='dw') + b1 = tvm.reduce_axis((0, IB), name='ib') + b2 = tvm.reduce_axis((0, KB), name='kb') + + def _conv(n, h, w, co, vh, vw, vc): + b1b2 = (b1+b2).astype(out_dtype) + if unipolar: + return tvm.sum( + ((tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] & + kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) - + tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1]& + ~kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype)) << b1b2), + axis=[dh, dw, ci, b1, b2]) + + return tvm.sum(tvm.popcount( + data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] & + kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) << b1b2, + axis=[dh, dw, ci, b1, b2]) + + conv = tvm.compute(ovshape, _conv, name='conv') + + idxd = tvm.indexdiv + idxm = tvm.indexmod + return tvm.compute( + oshape, lambda n, h, w, co: + conv[n, + idxd(h, VH), idxd(w, VW), idxd(co, VC), + idxm(h, VH), idxm(w, VW), idxm(co, VC)], + name='output_unpack', tag='spatial_bitserial_conv_nhwc') + +@autotvm.register_topi_schedule("bitserial_conv2d_nchw.x86") +def schedule_bitserial_conv2d_nchw(cfg, outs): + return _schedule_bitserial_conv2d(cfg, outs) + +@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.x86") +def schedule_bitserial_conv2d_nhwc(cfg, outs): + return _schedule_bitserial_conv2d(cfg, outs) + +def _schedule_bitserial_conv2d(cfg, outs): """CPU schedule for bitserial convolutions NCHW and NHWC""" s = tvm.create_schedule([x.op for x in outs]) scheduled_ops = [] diff --git a/topi/python/topi/x86/bitserial_dense.py b/topi/python/topi/x86/bitserial_dense.py index 47b972fa1319b..d464cae951b3c 100644 --- a/topi/python/topi/x86/bitserial_dense.py +++ b/topi/python/topi/x86/bitserial_dense.py @@ -19,11 +19,85 @@ from __future__ import absolute_import as _abs import tvm from tvm import autotvm -from topi.util import get_const_int +from topi.util import get_const_int, get_const_tuple from .. import tag -from .. import generic +from ..nn.bitserial_util import bitpack, binary_op_multiplier -@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_dense, ['cpu'], 'direct') +@autotvm.register_topi_compute('bitserial_dense.x86') +def bitserial_dense(cfg, data, weight, data_bits, weight_bits, pack_dtype='uint32', + out_dtype='int16', unipolar=True): + """Bitserial dense implementation. TODO: Why are these separate + + Parameters + ---------- + data : tvm.Tensor + 2-D with shape [batch, in_dim] + weight : tvm.Tensor + 2-D with shape [out_dim, in_dim] or + 3-D with shape [out_dim, weight_bits, in_dim] + Returns + ------- + output : tvm.Tensor + 2-D with shape [batch, out_dim] + """ + data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype) + if len(weight.shape) == 2: + weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype) + else: + weight_packed = weight + Y, DB, K = get_const_tuple(data_packed.shape) + X, WB, _ = get_const_tuple(weight_packed.shape) + ######## Search space + x, y = cfg.axis(X), cfg.axis(Y) + db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(K) + ko, ki = cfg.define_split('tile_k', k, num_outputs=2) + yo, yi = cfg.define_split('tile_y', y, num_outputs=2) + xo, xi = cfg.define_split('tile_x', x, num_outputs=2) + + cfg.define_reorder('reorder_0', [yo, xo, ko, yi, wb, db, ki, xi], + policy='candidate', candidate=[ + [yo, xo, ko, yi, wb, db, ki, xi], + [yo, xo, yi, ko, wb, db, ki, xi]]) + + cfg.define_annotate('ann_reduce', [db, wb], policy='try_unroll') + cfg.define_annotate('ann_spatial', [yi, xi], policy='try_unroll_vec') + + ###### Compute rule + VX = cfg['tile_x'].size[-1] + + wvshape = (X//VX, WB, VX, K) + oshape = (Y, X) + + k = tvm.reduce_axis((0, K), name='k') + db = tvm.reduce_axis((0, DB), name='db') + wb = tvm.reduce_axis((0, WB), name='wb') + + # Tile data and weights + weight_vec = tvm.compute(wvshape, lambda xo, wb, vx, k: + weight_packed[xo*VX+vx][wb][k], name='weight_vec') + + idxdiv = tvm.indexdiv + idxmod = tvm.indexmod + + matmul_unipolar = tvm.compute(oshape, lambda i, j: tvm.sum( + (tvm.popcount(weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]) - + tvm.popcount(~weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]) + ).astype(out_dtype) + << (db+wb).astype(out_dtype), axis=[wb, db, k]), tag='bitserial_dense_unipolar') + + matmul = tvm.compute(oshape, lambda i, j: tvm.sum( + tvm.popcount(weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k] + ).astype(out_dtype) + << (db+wb).astype(out_dtype), axis=[wb, db, k]), tag='bitserial_dense') + + # binary ops + cfg.add_flop(2 * Y * X * K * binary_op_multiplier(pack_dtype)) + + if unipolar: + return matmul_unipolar + return matmul + +@autotvm.register_topi_schedule('biserial_dense.x86') def schedule_bitserial_dense(cfg, outs): """Schedule for bitserial_dense. diff --git a/topi/python/topi/x86/conv1d.py b/topi/python/topi/x86/conv1d.py index 95fd159acd47f..70c2a6881dbf5 100644 --- a/topi/python/topi/x86/conv1d.py +++ b/topi/python/topi/x86/conv1d.py @@ -18,10 +18,9 @@ """Conv1D schedule on for Intel CPU""" from __future__ import absolute_import as _abs import tvm -from .. import generic, tag +from .. import tag -@generic.schedule_conv1d_ncw.register(["cpu"]) def schedule_conv1d_ncw(outs): """Create schedule for tensors""" s = tvm.create_schedule([x.op for x in outs]) @@ -76,7 +75,6 @@ def traverse(op): return s -@generic.schedule_conv1d_nwc.register(["cpu"]) def schedule_conv1d_nwc(outs): """Create schedule for tensors""" s = tvm.create_schedule([x.op for x in outs]) diff --git a/topi/python/topi/x86/conv2d.py b/topi/python/topi/x86/conv2d.py index 95ce3376ac3a1..b4b69d85fcfa8 100644 --- a/topi/python/topi/x86/conv2d.py +++ b/topi/python/topi/x86/conv2d.py @@ -14,25 +14,20 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name,unused-variable,unused-argument,no-member,import-outside-toplevel +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member """Conv2D schedule on x86""" import logging -import re import tvm from tvm import autotvm -from tvm.autotvm.task.topi_integration import deserialize_args -from tvm.autotvm.task import get_config -from .. import generic, tag +from .. import tag from .. import nn -from ..nn.conv2d import conv2d, conv2d_NCHWc, \ - conv2d_infer_layout, _get_workload as _get_conv2d_workload +from ..nn.conv2d import conv2d_infer_layout, _get_workload as _get_conv2d_workload +from ..nn.conv2d import unpack_NCHWc_to_nchw from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload -from ..nn.pad import pad from ..nn.util import get_pad_tuple -from ..util import get_const_tuple - +from ..util import get_const_tuple, traverse_inline from . import conv2d_avx_1x1, conv2d_avx_common logger = logging.getLogger('topi') @@ -61,199 +56,25 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depth else: conv2d_avx_common._fallback_schedule(cfg, wkl) -def _create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout): - """Create schedule configuration from input arguments""" - dshape = get_const_tuple(data.shape) - kshape = get_const_tuple(kernel.shape) - pat = re.compile(r'NCHW.+(\d+)c') - if layout == 'NCHW': - n, ic, h, w = dshape - oc, _, kh, kw = kshape - elif layout == 'NHWC': - n, h, w, ic = dshape - kh, kw, oc, _ = kshape - elif pat.match(layout) is not None: - n, ic_chunk, h, w, ic_bn = dshape - target = tvm.target.Target.current(allow_none=False) - oc_chunk, k_ic_chunk, kh, kw, k_ic_bn, oc_bn = kshape - assert ic_chunk == k_ic_chunk - assert ic_bn == k_ic_bn - ic = ic_chunk*ic_bn - oc = oc_chunk*oc_bn - else: - raise ValueError("Not support this layout {} with " - "schedule template.".format(layout)) - - is_kernel_1x1 = kh == 1 and kw == 1 - pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw)) - sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides) - oh = (h - kh + pt + pb) // sh + 1 - ow = (w - kw + pl + pr) // sw + 1 - - # Create schedule config - cfg.define_split("tile_ic", ic, num_outputs=2) - cfg.define_split("tile_oc", oc, num_outputs=2) - cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64) - if is_kernel_1x1: - cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1]) - else: - cfg.define_knob("unroll_kw", [True, False]) - - -@autotvm.register_topi_compute(conv2d, 'cpu', ['direct']) -def _declaration_conv(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - out_dtype = data.dtype if out_dtype is None else out_dtype - strides = strides if isinstance(strides, (tuple, list)) else (strides, strides) - dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation) - - if layout == 'NCHW': - _create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout) - if cfg.is_fallback: - _get_default_config(cfg, data, kernel, strides, padding, out_dtype) - return _declaration_conv_impl(cfg, data, kernel, strides, - padding, dilation, layout, out_dtype) - - # HWOI kernel layout is for NHWC and HWCN - kh, kw, _, _ = get_const_tuple(kernel.shape) - if layout == 'HWCN': - return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype) - # FIXME - https://github.com/apache/incubator-tvm/issues/4122 - # _declaration_conv_nhwc_pack expects kernel layout to be HWOI. However, the tests use HWIO - # layout. Commenting until we have clarity about the nhwc_pack implementation from the author. - # elif layout == 'NHWC' and kh == 1 and kw == 1 and kernel.dtype == "int8": - # if cfg.is_fallback: - # _get_default_config(cfg, data, kernel, strides, padding, out_dtype, False, layout) - # # specialize for INT8 1X1 conv on X86 - # return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides, - # padding, dilation, out_dtype) - if layout == 'NHWC': - return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype) - raise ValueError("not support this layout {} yet".format(layout)) - - -def _declaration_conv_impl(cfg, data, kernel, strides, padding, dilation, layout, out_dtype): - out_dtype = data.dtype if out_dtype is None else out_dtype - assert layout == 'NCHW', "only support NCHW convolution for AVX" - - assert isinstance(dilation, int) or len(dilation) == 2 - if isinstance(dilation, int): - dilation_h, dilation_w = dilation - else: - dilation_h, dilation_w = dilation - - HSTR, WSTR = strides - batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape) - num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape) - - pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, (kernel_height, kernel_width)) - pad_h = pad_top + pad_down - pad_w = pad_left + pad_right - - pad_height = in_height + pad_h - pad_width = in_width + pad_w - - dilated_kernel_h = (kernel_height - 1) * dilation_h + 1 - dilated_kernel_w = (kernel_width - 1) * dilation_w + 1 - out_height = (in_height + pad_h - dilated_kernel_h) // HSTR + 1 - out_width = (in_width + pad_w - dilated_kernel_w) // WSTR + 1 - - # pack data - DOPAD = (pad_h != 0 or pad_w != 0) - if DOPAD: - data_pad = pad(data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), \ - name="data_pad") - else: - data_pad = data - - # fetch schedule - ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - - shape = (batch_size, in_channel // ic_bn, pad_height, ic_bn, pad_width) - data_vec = tvm.compute(shape, - lambda n, C, h, c, w: data_pad[n, C * ic_bn + c, h, w], - name='data_vec') - - # pack kernel - shape = (num_filter//oc_bn, in_channel//ic_bn, - kernel_height, kernel_width, ic_bn, oc_bn) - kernel_vec = tvm.compute(shape, - lambda CO, CI, h, w, ci, co: - kernel[CO * oc_bn + co, CI * ic_bn + ci, h, w], - name='kernel_vec') - - # convolution - oshape = (batch_size, num_filter//oc_bn, out_height, out_width, oc_bn) - unpack_shape = (batch_size, num_filter, out_height, out_width) - - ic = tvm.reduce_axis((0, in_channel), name='ic') - kh = tvm.reduce_axis((0, kernel_height), name='kh') - kw = tvm.reduce_axis((0, kernel_width), name='kw') - idxmod = tvm.indexmod +@conv2d_infer_layout.register("cpu") +def _conv2d_infer_layout(workload, cfg): + _, data, kernel, strides, padding, dilation, layout, _, dtype = workload + batch_size, in_channel, in_height, in_width = data[:-1] + out_channel, _, k_height, k_width = kernel[:-1] idxdiv = tvm.indexdiv - conv = tvm.compute(oshape, lambda n, oc_chunk, oh, ow, oc_block: - tvm.sum(data_vec[n, idxdiv(ic, ic_bn), oh*HSTR+kh*dilation_h, - idxmod(ic, ic_bn), - ow*WSTR+kw*dilation_w].astype(out_dtype) * - kernel_vec[oc_chunk, idxdiv(ic, ic_bn), kh, kw, - idxmod(ic, ic_bn), - oc_block].astype(out_dtype), - axis=[ic, kh, kw]), name='conv') - - unpack = tvm.compute(unpack_shape, - lambda n, c, h, w: conv[n, idxdiv(c, oc_bn), h, w, idxmod(c, oc_bn)] - .astype(out_dtype), - name='output_unpack', - tag='conv2d_nchw') - return unpack - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_nchw, 'cpu', ['direct']) -def schedule_conv2d(cfg, outs): - """Create schedule for tensors""" - s = tvm.create_schedule([x.op for x in outs]) - scheduled_ops = [] - - def traverse(op): - """Traverse operators from computation graph""" - # inline all one-to-one-mapping operators except the last stage (output) - if tag.is_broadcast(op.tag): - if op not in s.outputs: - s[op].compute_inline() - for tensor in op.input_tensors: - if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops: - traverse(tensor.op) - - if 'conv2d_nchw' in op.tag: - output = op.output(0) - conv_out = op.input_tensors[0] - kernel_vec = conv_out.op.input_tensors[1] - kernel = kernel_vec.op.input_tensors[0] - if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag: - s[kernel].compute_inline() - data_vec = conv_out.op.input_tensors[0] - data = data_vec.op.input_tensors[0] - data_pad = None - if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag: - data_pad = data - data = data_pad.op.input_tensors[0] - - _, _, kh, kw = get_const_tuple(kernel.shape) - is_kernel_1x1 = kh == 1 and kw == 1 - args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]] - if is_kernel_1x1: - conv2d_avx_1x1._schedule_conv(*args) - else: - conv2d_avx_common._schedule_conv(*args) - - scheduled_ops.append(op) - - traverse(outs[0].op) - return s + pt, pl, pb, pr = get_pad_tuple(padding, (k_height, k_width)) + out_height = idxdiv(in_height + pt + pb - k_height, strides[0]) + 1 + out_width = idxdiv(in_width + pl + pr - k_width, strides[1]) + 1 + tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] + in_shape = (batch_size, idxdiv(in_channel, tile_ic), in_height, in_width, tile_ic) + in_layout = "NCHW%dc" % tile_ic + out_shape = (batch_size, idxdiv(out_channel, tile_oc), out_height, out_width, tile_oc) + out_layout = "NCHW%dc" % tile_oc + return ((in_shape, in_layout),), ((out_shape, out_layout),) -@generic.schedule_conv2d_nhwc.register("cpu") def schedule_conv2d_nhwc(outs): - """Create schedule for tensors""" + """Create schedule for conv2d_nhwc""" s = tvm.create_schedule([x.op for x in outs]) output_op = outs[0].op scheduled_ops = [] @@ -305,132 +126,116 @@ def traverse(op): traverse(output_op) return s +def conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype): + layout = "NCHW" + packed_out = conv2d_NCHWc(data, kernel, strides, padding, dilation, + layout, layout, out_dtype) + return unpack_NCHWc_to_nchw(packed_out, out_dtype) -# Define template function for autotvm task -# We define schedule template in this function instead of -# declaration function since actual input arguments need -# to be altered by the schedule selected. -@autotvm.task.register("topi_x86_conv2d_NCHWc") -def _topi_nn_conv2d_NCHWc(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) +def schedule_conv2d_nchw(outs): + """Create schedule for tensors""" + return schedule_conv2d_NCHWc(outs) - if len(args) == 7: - data, kernel, strides, padding, dilation, origin_layout, dtype = args - else: - assert len(args) == 8 - data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args +def _pack_data(cfg, data, kernel): + n, _, ih, iw = get_const_tuple(data.shape) + oc, ic, kh, kw = get_const_tuple(kernel.shape) + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - raw_data_shape = get_const_tuple(data.shape) - raw_kernel_shape = get_const_tuple(kernel.shape) + ic_chunk = ic // ic_bn + oc_chunk = oc // oc_bn - # get config here - cfg = get_config() - _create_tuning_space(cfg, data, kernel, strides, padding, dilation, origin_layout) + data = tvm.compute((n, ic_chunk, ih, iw, ic_bn), + lambda bs, c, h, w, vc: data[bs, c*ic_bn + vc, h, w], + name="data_vec") - idxdiv = tvm.indexdiv - idxmod = tvm.indexmod - # change shape with the value in config - ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1], - cfg["tile_ow"].size[-1]) - new_data_shape = (raw_data_shape[0], idxdiv(raw_data_shape[1], ic_bn), - raw_data_shape[2], raw_data_shape[3], ic_bn) - data_layout = "NCHW%dc" % ic_bn - out_layout = "NCHW%dc" % oc_bn - new_kernel_shape = (idxdiv(raw_kernel_shape[0], oc_bn), - idxdiv(raw_kernel_shape[1], ic_bn), - raw_kernel_shape[2], raw_kernel_shape[3], ic_bn, oc_bn) - new_data = tvm.placeholder(new_data_shape, data.dtype) - new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype) - - C = _declaration_conv_NCHWc(cfg, new_data, new_kernel, strides, padding, dilation, - data_layout, out_layout, dtype) - s = _schedule_conv2d_NCHWc(cfg, [C]) - return s, [new_data, new_kernel, C] + kernel = tvm.compute( + (oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn), + lambda occ, icc, k_h, k_w, icb, ocb: + kernel[occ * oc_bn + ocb, + icc * ic_bn + icb, k_h, k_w], + name="kernel_vec") + return data, kernel -@conv2d_infer_layout.register("cpu") -def _conv2d_infer_layout(workload, cfg): - _, data, kernel, strides, padding, dilation, layout, dtype = workload - batch_size, in_channel, in_height, in_width = data[:-1] - out_channel, _, k_height, k_width = kernel[:-1] - idxdiv = tvm.indexdiv - - pt, pl, pb, pr = get_pad_tuple(padding, (k_height, k_width)) - out_height = idxdiv(in_height + pt + pb - k_height, strides[0]) + 1 - out_width = idxdiv(in_width + pl + pr - k_width, strides[1]) + 1 - tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - in_shape = (batch_size, idxdiv(in_channel, tile_ic), in_height, in_width, tile_ic) - in_layout = "NCHW%dc" % tile_ic - out_shape = (batch_size, idxdiv(out_channel, tile_oc), out_height, out_width, tile_oc) - out_layout = "NCHW%dc" % tile_oc - return ((in_shape, in_layout),), ((out_shape, out_layout),) - - -@autotvm.register_topi_compute(conv2d_NCHWc, 'cpu', 'direct') -def _declaration_conv_NCHWc(cfg, data, kernel, strides, - padding, dilation, layout, out_layout, out_dtype): +@autotvm.register_topi_compute("conv2d_NCHWc.x86") +def conv2d_NCHWc(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype): # layout and out_layout are not used here, # we keep them for debug convenience when dumping autotvm workload - n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) - in_channel = ic_chunk * ic_bn - oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn = \ + if len(data.shape) == 5: + n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) + oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn = \ get_const_tuple(kernel.shape) - num_filter = oc_chunk * oc_bn + in_channel = ic_chunk * ic_bn + num_filter = oc_chunk * oc_bn + else: + n, in_channel, ih, iw = get_const_tuple(data.shape) + num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape) - # If no config was set, we can fallback to NCHW config. + # Define autotvm tuning space + is_kernel_1x1 = kernel_height == 1 and kernel_width == 1 + pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width)) + sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides) + oh = (ih - kernel_height + pt + pb) // sh + 1 + ow = (iw - kernel_width + pl + pr) // sw + 1 + + cfg.define_split("tile_ic", in_channel, num_outputs=2) + cfg.define_split("tile_oc", num_filter, num_outputs=2) + cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64) + if is_kernel_1x1: + cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1]) + else: + cfg.define_knob("unroll_kw", [True, False]) + + # If no config was set, we can fallback to default config. if cfg.is_fallback: _get_default_config(cfg, tvm.placeholder((n, in_channel, ih, iw), dtype=data.dtype), tvm.placeholder((num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype), strides, padding, out_dtype) - return nn.conv2d_NCHWc_compute(data, - kernel, - strides, - padding, - dilation, - layout, - out_layout, - out_dtype) - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc, 'cpu', ['direct']) -def _schedule_conv2d_NCHWc(cfg, outs): + # Pack data if raw 4-D data is provided. + # This can only happen when autotuning. + if len(data.shape) == 4: + data, kernel = _pack_data(cfg, data, kernel) + + return nn.conv2d_NCHWc(data, + kernel, + strides, + padding, + dilation, + layout, + out_layout, + out_dtype) + +@autotvm.register_topi_schedule("conv2d_NCHWc.x86") +def schedule_conv2d_NCHWc(cfg, outs): """Create schedule for tensors""" + outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) - scheduled_ops = [] - - def traverse(op): - """Traverse operators from computation graph""" - # inline all one-to-one-mapping operators except the last stage (output) - if tag.is_broadcast(op.tag): - if op not in s.outputs: - s[op].compute_inline() - for tensor in op.input_tensors: - if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops: - traverse(tensor.op) + def _callback(op): if 'conv2d_NCHWc' in op.tag: conv_out = op.output(0) - kernel = conv_out.op.input_tensors[1] + kernel_vec = conv_out.op.input_tensors[1] data_vec = conv_out.op.input_tensors[0] - data = data_vec.op.input_tensors[0] \ - if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \ - else data_vec - if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag: - data_pad = data - data = data_pad.op.input_tensors[0] - args = [s, cfg, data_vec, conv_out, outs[0]] - target = tvm.target.Target.current(allow_none=False) - _, _, kh, kw, _, _, = get_const_tuple(kernel.shape) + args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]] + _, _, kh, kw, _, _, = get_const_tuple(kernel_vec.shape) if kh == 1 and kw == 1: conv2d_avx_1x1._schedule_conv_NCHWc(*args) else: conv2d_avx_common._schedule_conv_NCHWc(*args) - scheduled_ops.append(op) - - traverse(outs[0].op) + traverse_inline(s, outs[0].op, _callback) return s + + +# FIXME - https://github.com/apache/incubator-tvm/issues/4122 +# _declaration_conv_nhwc_pack expects kernel layout to be HWOI. However, the tests use HWIO +# layout. Commenting until we have clarity about the nhwc_pack implementation from the author. +# elif layout == 'NHWC' and kh == 1 and kw == 1 and kernel.dtype == "int8": +# if cfg.is_fallback: +# _get_default_config(cfg, data, kernel, strides, padding, out_dtype, False, layout) +# # specialize for INT8 1X1 conv on X86 +# return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides, +# padding, dilation, out_dtype) diff --git a/topi/python/topi/x86/conv2d_alter_op.py b/topi/python/topi/x86/conv2d_alter_op.py index 8b0c13c2c0bba..10f11ffe34564 100644 --- a/topi/python/topi/x86/conv2d_alter_op.py +++ b/topi/python/topi/x86/conv2d_alter_op.py @@ -23,117 +23,102 @@ from tvm import relay from tvm import autotvm from .conv2d import _get_default_config -from .conv2d_int8 import _is_int8_hw_support, _get_default_config_int8 -from ..util import get_const_tuple, get_shape -from ..nn import conv2d_legalize -from ..nn.conv2d import conv2d, conv2d_NCHWc, conv2d_NCHWc_int8, conv2d_alter_layout -from ..nn.depthwise_conv2d import depthwise_conv2d_NCHWc, depthwise_conv2d_nchw +from .conv2d_int8 import is_int8_hw_support, _get_default_config_int8 +from ..util import get_const_tuple +from ..nn import conv2d_legalize, conv2d_alter_layout from ..nn.util import get_pad_tuple logger = logging.getLogger('topi') @conv2d_alter_layout.register("cpu") -def _alter_conv2d_layout(attrs, inputs, tinfo, F): +def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): + target = tvm.target.current_target(allow_none=False) + dispatch_ctx = autotvm.task.DispatchContext.current + if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest): + cfg = dispatch_ctx.query(target, None) + workload = cfg.workload + else: + _, outs = relay.backend.compile_engine.select_implement( + relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target) + workload = autotvm.task.get_workload(outs) + if workload is None: + # The best implementation is not an AutoTVM template, + # we then assume it's not necessary to alter this op. + return None + cfg = dispatch_ctx.query(target, workload) + + topi_tmpl = workload[0] + new_attrs = {k : attrs[k] for k in attrs.keys()} + # Parse the attributes. - groups = attrs.get_int("groups") padding = attrs.get_int_tuple("padding") strides = attrs.get_int_tuple("strides") dilation = attrs.get_int_tuple("dilation") - out_dtype = attrs["out_dtype"] - layout_name = 'data_layout' - data_layout = attrs[layout_name] - kh, kw = attrs.get_int_tuple("kernel_size") - - data_tensor, kernel_tensor = tinfo[0], tinfo[1] - if attrs[layout_name] == 'NHWC' and attrs['kernel_layout'] == 'HWIO': - batch_size, height, width, in_channel = get_const_tuple(data_tensor.shape) - kh, kw, _, out_channel = get_const_tuple(kernel_tensor.shape) - elif attrs[layout_name] == 'NCHW' and attrs['kernel_layout'] == 'OIHW': - batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape) - out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape) - else: - return None - + data_layout = attrs["data_layout"] + kernel_layout = attrs["kernel_layout"] + data_tensor, kernel_tensor = tinfos data_dtype = data_tensor.dtype kernel_dtype = kernel_tensor.dtype - out_dtype = data_dtype if out_dtype in ("same", "") else out_dtype - - # Check if depthwise. - kshape = get_shape(kernel_tensor.shape, attrs["kernel_layout"], "OIHW") - is_depthwise = groups == kshape[0] and kshape[1] == 1 - - # Save the input exprs. - copy_inputs = list(inputs) - - # Set the new attrs - new_attrs = {k : attrs[k] for k in attrs.keys()} - new_attrs['channels'] = out_channel - - # Return if the groups is not 1 and depthwise. - if groups != 1 and not is_depthwise: - return None - - # Set workload. Config update. - dispatch_ctx = autotvm.task.DispatchContext.current - target = tvm.target.Target.current() - - if is_depthwise: - workload = autotvm.task.args_to_workload( - [data_tensor, kernel_tensor, strides, padding, dilation, out_dtype], - depthwise_conv2d_nchw) - else: - workload = autotvm.task.args_to_workload( - [data_tensor, kernel_tensor, strides, padding, dilation, data_layout, out_dtype], - conv2d) - - cfg = dispatch_ctx.query(target, workload) - if cfg.is_fallback: - if _is_int8_hw_support(data_dtype, kernel_dtype): - _get_default_config_int8(cfg, data_tensor, kernel_tensor, strides, padding, out_dtype, - is_depthwise, data_layout) - else: - _get_default_config(cfg, data_tensor, kernel_tensor, strides, padding, out_dtype, - is_depthwise, data_layout) + out_dtype = out_type.dtype + + if topi_tmpl == "conv2d_NCHWc.x86": + # we only convert conv2d_NCHW to conv2d_NCHWc for x86 + assert data_layout == "NCHW" and kernel_layout == "OIHW" + if cfg.is_fallback: + _get_default_config(cfg, data_tensor, kernel_tensor, strides, padding, + out_dtype, False, data_layout) + batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape) + out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape) + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - # Get the tiling parameters to set the layout names. - ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - new_attrs[layout_name] = 'NCHW%dc' % ic_bn - new_attrs['out_layout'] = 'NCHW%dc' % oc_bn - new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), - dtype=data_dtype) + # update new attrs + new_attrs['channels'] = out_channel + new_attrs['data_layout'] = 'NCHW%dc' % ic_bn + # (oc, ic, h, w) -> (OC, IC, h, w, ic, oc) + new_attrs['kernel_layout'] = 'OIHW%di%do' % (ic_bn, oc_bn) + new_attrs['out_layout'] = 'NCHW%dc' % oc_bn - if is_depthwise and data_layout == 'NCHW' and attrs['kernel_layout'] == 'OIHW': - new_attrs['kernel_layout'] = 'OIHW1i%do' % oc_bn # Store altered operator's config - new_kernel = tvm.placeholder((out_channel//oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype) + new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), + dtype=data_dtype) + new_kernel = tvm.placeholder((out_channel//oc_bn, in_channel//ic_bn, + kh, kw, ic_bn, oc_bn), dtype=kernel_tensor.dtype) new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, new_attrs[layout_name], - new_attrs['out_layout'], out_dtype], depthwise_conv2d_NCHWc) + [new_data, new_kernel, strides, padding, dilation, new_attrs["data_layout"], + new_attrs["out_layout"], out_dtype], topi_tmpl) dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs) + elif topi_tmpl == "conv2d_NCHWc_int8.x86": + # TODO(@icemelon9, @anijain2305): Need to support data layout NHWC with kernel layout HWIO + assert data_layout == "NCHW" and kernel_layout == "OIHW" + if cfg.is_fallback: + _get_default_config_int8(cfg, data_tensor, kernel_tensor, strides, padding, + out_dtype, False, data_layout) - return F.nn.contrib_depthwise_conv2d_nchwc(*copy_inputs, **new_attrs) - - if _is_int8_hw_support(data_dtype, kernel_dtype): - # Convert kernel data layout from 4D to 7D + batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape) + out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape) + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] n_elems = 4 - data_expr, kernel_expr = inputs - if attrs['kernel_layout'] == 'HWIO': - kernel_IHWO = F.transpose(kernel_expr, axes=(2, 0, 1, 3)) - elif attrs['kernel_layout'] == 'OIHW': - kernel_IHWO = F.transpose(kernel_expr, axes=(1, 2, 3, 0)) - else: - return None - - kernel_IHWOo = F.reshape(kernel_IHWO, (in_channel, kh, kw, out_channel//oc_bn, oc_bn)) - kernel_OHWoI = F.transpose(kernel_IHWOo, axes=(3, 1, 2, 4, 0)) - kernel_OHWoIi = F.reshape(kernel_OHWoI, (out_channel//oc_bn, kh, kw, oc_bn, - in_channel//ic_bn, ic_bn)) - kernel_OHWoIie = F.reshape(kernel_OHWoIi, (out_channel//oc_bn, kh, kw, oc_bn, - in_channel//ic_bn, ic_bn//n_elems, n_elems)) - kernel_OIHWioe = F.transpose(kernel_OHWoIie, axes=(0, 4, 1, 2, 5, 3, 6)) - copy_inputs = [data_expr, kernel_OIHWioe] - # Store altered operator's config. New kernel layout OIHWio4 + # convert kernel data layout from 4D to 7D + data_expr, kernel_expr = inputs + kernel_IHWO = relay.transpose(kernel_expr, axes=(1, 2, 3, 0)) + kernel_IHWOo = relay.reshape(kernel_IHWO, (in_channel, kh, kw, out_channel//oc_bn, oc_bn)) + kernel_OHWoI = relay.transpose(kernel_IHWOo, axes=(3, 1, 2, 4, 0)) + kernel_OHWoIi = relay.reshape(kernel_OHWoI, (out_channel//oc_bn, kh, kw, oc_bn, + in_channel//ic_bn, ic_bn)) + kernel_OHWoIie = relay.reshape(kernel_OHWoIi, (out_channel//oc_bn, kh, kw, oc_bn, + in_channel//ic_bn, ic_bn//n_elems, n_elems)) + kernel_OIHWioe = relay.transpose(kernel_OHWoIie, axes=(0, 4, 1, 2, 5, 3, 6)) + + # update new attrs + new_attrs['channels'] = out_channel + new_attrs['data_layout'] = 'NCHW%dc' % ic_bn + new_attrs['out_layout'] = 'NCHW%dc' % oc_bn + + # Store altered operator's config. + new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), + dtype=data_dtype) new_kernel = tvm.placeholder((out_channel // oc_bn, in_channel // ic_bn, kh, @@ -141,30 +126,40 @@ def _alter_conv2d_layout(attrs, inputs, tinfo, F): ic_bn // n_elems, oc_bn, n_elems), dtype=kernel_dtype) - - new_workload = autotvm.task.args_to_workload([new_data, - new_kernel, - strides, - padding, - dilation, - new_attrs[layout_name], - new_attrs['out_layout'], - out_dtype], - conv2d_NCHWc_int8) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, new_attrs['data_layout'], + new_attrs['out_layout'], out_dtype], topi_tmpl) dispatch_ctx.update(target, new_workload, cfg) - return F.nn.contrib_conv2d_nchwc_int8(*copy_inputs, **new_attrs) - # (oc, ic, h, w) -> (OC, IC, h, w, ic, oc) - new_attrs['kernel_layout'] = 'OIHW%di%do' % (ic_bn, oc_bn) - # Store altered operator's config - new_kernel = tvm.placeholder((out_channel//oc_bn, in_channel//ic_bn, - kh, kw, ic_bn, oc_bn), dtype=kernel_tensor.dtype) - new_workload = autotvm.task.args_to_workload( - [new_data, new_kernel, strides, padding, dilation, new_attrs[layout_name], - new_attrs['out_layout'], out_dtype], conv2d_NCHWc) - dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_conv2d_nchwc(data_expr, kernel_OIHWioe, **new_attrs) + elif topi_tmpl == "depthwise_conv2d_NCHWc.x86": + assert data_layout == "NCHW" and kernel_layout == "OIHW" + if cfg.is_fallback: + _get_default_config(cfg, data_tensor, kernel_tensor, strides, padding, + out_dtype, True, data_layout) - return F.nn.contrib_conv2d_nchwc(*copy_inputs, **new_attrs) + batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape) + out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape) + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] + assert channel_multiplier == 1 + + # update new attrs + new_attrs['channels'] = out_channel + new_attrs['data_layout'] = 'NCHW%dc' % ic_bn + new_attrs['kernel_layout'] = 'OIHW1i%do' % oc_bn + new_attrs['out_layout'] = 'NCHW%dc' % oc_bn + + # Store altered operator's config. + new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn), + dtype=data_dtype) + new_kernel = tvm.placeholder((out_channel//oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype) + new_workload = autotvm.task.args_to_workload( + [new_data, new_kernel, strides, padding, dilation, new_attrs['data_layout'], + new_attrs['out_layout'], out_dtype], topi_tmpl) + dispatch_ctx.update(target, new_workload, cfg) + return relay.nn.contrib_depthwise_conv2d_nchwc(*inputs, **new_attrs) + else: + return None @conv2d_legalize.register("cpu") @@ -254,7 +249,7 @@ def _conv2d_legalize(attrs, inputs, arg_types): # input channel to be a multiple of 4 and output channels to be a multiple of 16. For input # channels, we pad both the inputs and weights input channels. For output channels, we pad the # weight and stride_slice the output. - if _is_int8_hw_support(data_dtype, kernel_dtype): + if is_int8_hw_support(data_dtype, kernel_dtype): # Flags to remember if the expr is modified ic_modified = False oc_modified = False @@ -311,4 +306,5 @@ def _conv2d_legalize(attrs, inputs, arg_types): out = relay.subtract(out, adjust_shift) return out - return None + else: + return None diff --git a/topi/python/topi/x86/conv2d_avx_1x1.py b/topi/python/topi/x86/conv2d_avx_1x1.py index 9726f3d8d4f90..d04f99b774d4a 100644 --- a/topi/python/topi/x86/conv2d_avx_1x1.py +++ b/topi/python/topi/x86/conv2d_avx_1x1.py @@ -18,10 +18,11 @@ """1x1 Conv2D schedule on for Intel CPU""" from __future__ import absolute_import as _abs import tvm +from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity from ..nn.pad import pad -from ..nn.util import infer_pad, get_pad_tuple +from ..nn.util import get_pad_tuple, infer_pad from ..generic import conv2d as conv2d_generic from ..util import get_const_tuple, simplify from .tensor_intrin import dot_16x1x16_uint8_int8_int32 @@ -58,84 +59,41 @@ def _fallback_schedule(cfg, wkl): raise ValueError("cannot decide default schedule for workload: {}".format(wkl)) -def _schedule_conv(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last): - # fetch schedule - ic_bn, oc_bn, oh_factor, ow_factor = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1], - cfg["tile_oh"].val, cfg["tile_ow"].size[-1]) - - # no stride and padding info here - padding = infer_pad(data, data_pad) - HPAD, WPAD = padding - DOPAD = (HPAD != 0 or WPAD != 0) - - A, W = data, kernel_vec - A0, A1 = data_pad, data_vec - # schedule data - if DOPAD: - s[A0].compute_inline() - batch, ic_chunk, ih, ic_block, iw = s[A1].op.axis - parallel_axis = s[A1].fuse(batch, ic_chunk, ih) - s[A1].parallel(parallel_axis) - - # schedule kernel pack - oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis - s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) - if oc_bn > 1: - s[W].vectorize(oc_block) - parallel_axis = s[W].fuse(oc_chunk, oh) - s[W].parallel(parallel_axis) - - C, O0, O = conv_out, output, last - CC = s.cache_write(C, 'global') - - batch, oc_chunk, oh, ow, oc_block = s[C].op.axis - oh_outer, oh_inner = s[C].split(oh, factor=oh_factor) - s[C].vectorize(oc_block) - - s[CC].compute_at(s[C], oh_outer) - _, oc_chunk, oh, ow, oc_block = s[CC].op.axis - ic, _, _ = s[CC].op.reduce_axis - - ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn) - - oh_outer, oh_inner = s[CC].split(oh, factor=oh_factor) - ow_outer, ow_inner = s[CC].split(ow, factor=ow_factor) - - s[CC].reorder(oc_chunk, oh_outer, ow_outer, ic_chunk, ic_block, oh_inner, ow_inner, oc_block) - s[CC].vectorize(oc_block) - - s[CC].unroll(ow_inner) - s[CC].unroll(oh_inner) - - if O0 != O: - s[O0].compute_inline() - batch, oc, oh, ow = s[O].op.axis - - oc_chunk, oc_block = s[O].split(oc, factor=oc_bn) - oh_outer, oh_inner = s[O].split(oh, factor=oh_factor) - ow_outer, ow_inner = s[O].split(ow, factor=ow_factor) - s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block) - - parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer) - s[C].compute_at(s[O], parallel_axis) - s[O].vectorize(oc_block) - - s[O].parallel(parallel_axis) - - return s - - -def _schedule_conv_NCHWc(s, cfg, data, conv_out, last): +def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last): # fetch schedule oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1] - _, _, _, _, ic_bn = get_const_tuple(data.shape) - - # schedule data - A = data - if isinstance(s[A].op, tvm.tensor.ComputeOp): - batch, ic_chunk, ih, iw, ic_block = s[A].op.axis - parallel_axis = s[A].fuse(batch, ic_chunk, ih) - s[A].parallel(parallel_axis) + _, _, _, _, ic_bn = get_const_tuple(data_vec.shape) + + # schedule pad + if isinstance(s[data_vec].op, tvm.tensor.ComputeOp) \ + and "pad" in data_vec.op.tag: + batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + data_vec = data_vec.op.input_tensors[0] + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make records accurate. + # this part will be folded during Relay fold_constant pass. + s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region") + s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region") + elif isinstance(kernel_vec.op, tvm.tensor.ComputeOp) and \ + kernel_vec.name == 'kernel_vec': + # data and kernel are not pre-computed, schedule layout transform here. + # this should only be used by x86 conv2d_nchw, which is for + # testing purpose. + batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + + oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis + s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) + oc_bn = cfg["tile_oc"].size[-1] + if oc_bn > 1: + s[kernel_vec].vectorize(oc_block) + parallel_axis = s[kernel_vec].fuse(oc_chunk, oh) + s[kernel_vec].parallel(parallel_axis) C, O = conv_out, last CC = s.cache_write(C, 'global') @@ -167,22 +125,36 @@ def _schedule_conv_NCHWc(s, cfg, data, conv_out, last): s[CC].unroll(oh_inner) if C != O: - batch, oc_chunk, oh, ow, oc_block = s[O].op.axis - oh_outer, oh_inner = s[O].split(oh, factor=oh_factor) - ow_outer, ow_inner = s[O].split(ow, factor=ow_factor) - s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block) - - parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer) - s[C].compute_at(s[O], parallel_axis) - s[O].vectorize(oc_block) - s[O].parallel(parallel_axis) + out_ndim = len(s[O].op.axis) + if out_ndim == 5: + batch, oc_chunk, oh, ow, oc_block = s[O].op.axis + oh_outer, oh_inner = s[O].split(oh, factor=oh_factor) + ow_outer, ow_inner = s[O].split(ow, factor=ow_factor) + s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block) + + parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer) + s[C].compute_at(s[O], parallel_axis) + s[O].vectorize(oc_block) + s[O].parallel(parallel_axis) + elif out_ndim == 4: + batch, oc, oh, ow = s[O].op.axis + oc_chunk, oc_block = s[O].split(oc, factor=oc_bn) + oh_outer, oh_inner = s[O].split(oh, factor=oh_factor) + ow_outer, ow_inner = s[O].split(ow, factor=ow_factor) + s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block) + parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer) + s[C].compute_at(s[O], parallel_axis) + s[O].vectorize(oc_block) + s[O].parallel(parallel_axis) + else: + raise ValueError("Unsupported output ndim: %s" % out_ndim) return s -def _schedule_conv_NCHWc_int8(s, cfg, data, conv_out, last): - return conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(s, cfg, data, conv_out, last, - int32_lanes=16, +def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last): + return conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(s, cfg, data_vec, kernel_vec, + conv_out, last, int32_lanes=16, intrin=dot_16x1x16_uint8_int8_int32()) diff --git a/topi/python/topi/x86/conv2d_avx_common.py b/topi/python/topi/x86/conv2d_avx_common.py index 7c5096dc2c1a9..085d0aeb67c3b 100644 --- a/topi/python/topi/x86/conv2d_avx_common.py +++ b/topi/python/topi/x86/conv2d_avx_common.py @@ -18,9 +18,9 @@ """Conv2D schedule on for Intel CPU""" from __future__ import absolute_import as _abs import tvm +from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity -from ..nn.util import infer_pad from ..generic import conv2d as conv2d_generic from ..util import get_const_tuple from .tensor_intrin import dot_16x1x16_uint8_int8_int32 @@ -83,88 +83,42 @@ def _fallback_schedule_int8(cfg, wkl): cfg["unroll_kw"] = OtherOptionEntity(False) -def _schedule_conv(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last): - # fetch schedule - ic_bn, oc_bn, reg_n, unroll_kw = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1], - cfg["tile_ow"].size[-1], cfg["unroll_kw"].val) - - # no stride and padding info here - padding = infer_pad(data, data_pad) - HPAD, WPAD = padding - DOPAD = (HPAD != 0 or WPAD != 0) - - A, W = data, kernel_vec - A0, A1 = data_pad, data_vec - - # schedule data - if DOPAD: - s[A0].compute_inline() - batch, ic_chunk, ih, ic_block, iw = s[A1].op.axis - parallel_axis = s[A1].fuse(batch, ic_chunk, ih) - s[A1].parallel(parallel_axis) - - # schedule kernel pack - oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis - s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) - if oc_bn > 1: - s[W].vectorize(oc_block) - parallel_axis = s[W].fuse(oc_chunk, oh) - s[W].parallel(parallel_axis) - - # schedule conv - C, O0, O = conv_out, output, last - CC = s.cache_write(C, 'global') - - _, oc_chunk, oh, ow, oc_block = s[C].op.axis - ow_chunk, ow_block = s[C].split(ow, factor=reg_n) - s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block) - s[C].fuse(oc_chunk, oh) - s[C].vectorize(oc_block) - - s[CC].compute_at(s[C], ow_chunk) - _, oc_chunk, oh, ow, oc_block = s[CC].op.axis - ic, kh, kw = s[CC].op.reduce_axis - - ow_chunk, ow_block = s[CC].split(ow, factor=reg_n) - ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn) - - if unroll_kw: - s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block) - s[CC].unroll(kw) - else: - s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block) - - s[CC].fuse(oc_chunk, oh) - s[CC].vectorize(oc_block) - s[CC].unroll(ow_block) - - if O0 != O: - s[O0].compute_inline() - - batch, oc, oh, ow = s[O].op.axis - ow_chunk, ow_block = s[O].split(ow, factor=reg_n) - oc_chunk, oc_block = s[O].split(oc, factor=oc_bn) - s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block) - parallel_axis = s[O].fuse(batch, oc_chunk, oh) - s[C].compute_at(s[O], parallel_axis) - s[O].vectorize(oc_block) - - s[O].parallel(parallel_axis) - - return s - - -def _schedule_conv_NCHWc(s, cfg, data, conv_out, last): +def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last): # fetch schedule reg_n, unroll_kw = cfg["tile_ow"].size[-1], cfg["unroll_kw"].val - _, _, _, _, ic_bn = get_const_tuple(data.shape) + _, _, _, _, ic_bn = get_const_tuple(data_vec.shape) + + # schedule pad + if isinstance(s[data_vec].op, tvm.tensor.ComputeOp) \ + and "pad" in data_vec.op.tag: + batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + data_vec = data_vec.op.input_tensors[0] + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make records accurate. + # this part will be folded during Relay fold_constant pass. + s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region") + s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region") + elif isinstance(kernel_vec.op, tvm.tensor.ComputeOp) and \ + kernel_vec.name == 'kernel_vec': + # data and kernel are not pre-computed, schedule layout transform here. + # this should only be used by x86 conv2d_nchw, which is for + # testing purpose. + batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + + oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis + s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block) + oc_bn = cfg["tile_oc"].size[-1] + if oc_bn > 1: + s[kernel_vec].vectorize(oc_block) + parallel_axis = s[kernel_vec].fuse(oc_chunk, oh) + s[kernel_vec].parallel(parallel_axis) - # schedule data - A = data - if isinstance(s[A].op, tvm.tensor.ComputeOp): - batch, ic_chunk, ih, iw, ic_block = s[A].op.axis - parallel_axis = s[A].fuse(batch, ic_chunk, ih) - s[A].parallel(parallel_axis) # schedule 5-D NCHW[x]c conv C, O = conv_out, last @@ -195,18 +149,31 @@ def _schedule_conv_NCHWc(s, cfg, data, conv_out, last): s[CC].unroll(ow_block) if C != O: - batch, oc_chunk, oh, ow, oc_block = s[O].op.axis - ow_chunk, ow_block = s[O].split(ow, factor=reg_n) - s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block) - parallel_axis = s[O].fuse(batch, oc_chunk, oh) - s[C].compute_at(s[O], parallel_axis) - s[O].vectorize(oc_block) - s[O].parallel(parallel_axis) + out_ndim = len(s[O].op.axis) + if out_ndim == 5: + batch, oc_chunk, oh, ow, oc_block = s[O].op.axis + ow_chunk, ow_block = s[O].split(ow, factor=reg_n) + s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block) + parallel_axis = s[O].fuse(batch, oc_chunk, oh) + s[C].compute_at(s[O], parallel_axis) + s[O].vectorize(oc_block) + s[O].parallel(parallel_axis) + elif out_ndim == 4: + batch, oc, oh, ow = s[O].op.axis + ow_chunk, ow_block = s[O].split(ow, factor=reg_n) + oc_chunk, oc_block = s[O].split(oc, factor=oc_bn) + s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block) + parallel_axis = s[O].fuse(batch, oc_chunk, oh) + s[C].compute_at(s[O], parallel_axis) + s[O].vectorize(oc_block) + s[O].parallel(parallel_axis) + else: + raise ValueError("Unsupported output ndim: %s" % out_ndim) return s -def _schedule_conv_NCHWc_int8(s, cfg, data, conv_out, last): - return conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(s, cfg, data, conv_out, last, - int32_lanes=16, +def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last): + return conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(s, cfg, data_vec, kernel_vec, + conv_out, last, int32_lanes=16, intrin=dot_16x1x16_uint8_int8_int32()) diff --git a/topi/python/topi/x86/conv2d_int8.py b/topi/python/topi/x86/conv2d_int8.py index 20712d2f6f4fe..06c80e6e39ca2 100644 --- a/topi/python/topi/x86/conv2d_int8.py +++ b/topi/python/topi/x86/conv2d_int8.py @@ -20,15 +20,13 @@ import re import tvm from tvm import autotvm -from tvm.autotvm.task import get_config -from tvm.autotvm.task.topi_integration import deserialize_args from ..nn.conv2d import _get_workload as _get_conv2d_workload -from .. import generic, tag +from .. import tag from ..generic import conv2d as conv2d_generic from ..nn.util import get_pad_tuple -from ..util import get_const_tuple -from ..nn.conv2d import conv2d_NCHWc_int8 +from ..nn.conv2d import unpack_NCHWc_to_nchw from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload +from ..util import get_const_tuple, traverse_inline from .. import nn from . import conv2d_avx_1x1, conv2d_avx_common @@ -53,7 +51,7 @@ def _get_default_config_int8(cfg, data, kernel, strides, padding, out_dtype, is_ cfg, wkl, int32_lanes=16, num_int8_elements=4) -def _is_int8_hw_support(data_dtype, kernel_dtype): +def is_int8_hw_support(data_dtype, kernel_dtype): """ Checks to ensure that we can use Intel DLBoost instructions 1) The datatypes are correct. @@ -64,7 +62,7 @@ def _is_int8_hw_support(data_dtype, kernel_dtype): is_dtype_support = data_dtype == 'uint8' and kernel_dtype == 'int8' # 2) Check LLVM support - llvm_version = tvm.target.codegen.llvm_version_major() + llvm_version = tvm.codegen.llvm_version_major() is_llvm_support = llvm_version >= 8 # 3) Check target @@ -76,150 +74,120 @@ def _is_int8_hw_support(data_dtype, kernel_dtype): return is_dtype_support and is_llvm_support and is_target_support -def _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, layout): - """Create schedule configuration from input arguments""" - dshape = get_const_tuple(data.shape) - kshape = get_const_tuple(kernel.shape) - pat = re.compile(r'NCHW.+(\d+)c') - if layout == 'NCHW': - n, ic, h, w = dshape - oc, _, kh, kw = kshape - elif layout == 'NHWC': - n, h, w, ic = dshape - kh, kw, oc, _ = kshape - elif pat.match(layout) is not None: - n, ic_chunk, h, w, ic_bn = dshape - target = tvm.target.Target.current(allow_none=False) - oc_chunk, k_ic, kh, kw, k_ic_f, oc_bn, k_ic_s = kshape - ic = ic_chunk * ic_bn - assert ic == k_ic * k_ic_f * k_ic_s - oc = oc_chunk*oc_bn +def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype): + layout = "NCHW" + packed_out = conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, + layout, layout, out_dtype) + return unpack_NCHWc_to_nchw(packed_out, out_dtype) + + +def schedule_conv2d_nchw_int8(outs): + return schedule_conv2d_NCHWc_int8(outs) + + +def _pack_data(cfg, data, kernel): + n_elems = 4 + n, _, ih, iw = get_const_tuple(data.shape) + oc, ic, kh, kw = get_const_tuple(kernel.shape) + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] + + ic_chunk = ic // ic_bn + oc_chunk = oc // oc_bn + + data = tvm.compute((n, ic_chunk, ih, iw, ic_bn), + lambda bs, c, h, w, vc: data[bs, c*ic_bn + vc, h, w], + name="data_vec") + + kernel = tvm.compute( + (oc_chunk, ic_chunk, kh, kw, ic_bn//n_elems, oc_bn, n_elems), + lambda occ, icc, k_h, k_w, icbc, ocb, icbb: + kernel[occ * oc_bn + ocb, + icc * ic_bn + icbc * ic_bn//n_elems + icbb, k_h, k_w], + name="kernel_vec") + + return data, kernel + + +@autotvm.register_topi_compute("conv2d_NCHWc_int8.x86") +def conv2d_NCHWc_int8(cfg, data, kernel, strides, padding, + dilation, layout, out_layout, out_dtype): + if len(data.shape) == 5: + n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) + in_channel = ic_chunk * ic_bn + oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn, _ \ + = get_const_tuple(kernel.shape) + num_filter = oc_chunk * oc_bn else: - raise ValueError("Not support this layout {} with " - "schedule template.".format(layout)) + n, in_channel, ih, iw = get_const_tuple(data.shape) + num_filter, _, kernel_height, kernel_width = \ + get_const_tuple(kernel.shape) - is_kernel_1x1 = kh == 1 and kw == 1 - pt, pl, pb, pr = get_pad_tuple(padding, kernel) + # Define autotvm tuning space + is_kernel_1x1 = kernel_height == 1 and kernel_width == 1 + pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width)) sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides) - oh = (h - kh + pt + pb) // sh + 1 - ow = (w - kw + pl + pr) // sw + 1 + oh = (ih - kernel_height + pt + pb) // sh + 1 + ow = (iw - kernel_width + pl + pr) // sw + 1 - # Create schedule config - cfg.define_split('tile_ic', ic, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0) - cfg.define_split('tile_oc', oc, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0) + cfg.define_split('tile_ic', in_channel, num_outputs=2, + filter=lambda y: y.size[-1] % 4 == 0) + cfg.define_split('tile_oc', num_filter, num_outputs=2, + filter=lambda y: y.size[-1] % 16 == 0) cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64) if is_kernel_1x1: cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1]) else: cfg.define_knob("unroll_kw", [True, False]) - -# Define template function for autotvm task -# We define schedule template in this function instead of -# declaration function since actual input arguments need -# to be altered by the schedule selected. -@autotvm.task.register("topi_x86_conv2d_NCHWc_int8") -def _topi_nn_conv2d_NCHWc_int8(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - args = deserialize_args(args) - - if len(args) == 7: - data, kernel, strides, padding, dilation, origin_layout, dtype = args - else: - assert len(args) == 8 - data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args - - raw_data_shape = get_const_tuple(data.shape) - raw_kernel_shape = get_const_tuple(kernel.shape) - - # get config here - cfg = get_config() - _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, origin_layout) - - # change shape with the value in config - ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1], - cfg["tile_ow"].size[-1]) - - data_layout = "NCHW%dc" % ic_bn - out_layout = "NCHW%dc" % oc_bn - - # Set up the new shape for data and kernel - new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn, - raw_data_shape[2], raw_data_shape[3], ic_bn) - n_elems = 4 - new_kernel_shape = (raw_kernel_shape[0] // oc_bn, - raw_kernel_shape[1] // ic_bn, - raw_kernel_shape[2], - raw_kernel_shape[3], - ic_bn // n_elems, - oc_bn, - n_elems) - - new_data = tvm.placeholder(new_data_shape, data.dtype) - new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype) - - C = _declaration_conv_NCHWc_int8(cfg, new_data, new_kernel, strides, padding, dilation, - data_layout, out_layout, dtype) - s = _schedule_conv2d_NCHWc_int8(cfg, [C]) - return s, [new_data, new_kernel, C] - - -@autotvm.register_topi_compute(conv2d_NCHWc_int8, 'cpu', 'direct') -def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides, - padding, dilation, layout, out_layout, out_dtype): - return nn.conv2d_NCHWc_int8_compute(data, - kernel, - strides, - padding, - dilation, - layout, - out_layout, - out_dtype) - - -@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, 'cpu', ['direct']) -def _schedule_conv2d_NCHWc_int8(cfg, outs): + # If no config was set, we can fallback to default config. + if cfg.is_fallback: + _get_default_config_int8( + cfg, tvm.placeholder((n, in_channel, ih, iw), dtype=data.dtype), + tvm.placeholder((num_filter, in_channel, kernel_height, kernel_width), + dtype=kernel.dtype), + strides, padding, out_dtype) + + # Pack data if raw 4-D data is provided. + # This can only happen when autotuning. + if len(data.shape) == 4: + data, kernel = _pack_data(cfg, data, kernel) + + return nn.conv2d_NCHWc_int8(data, + kernel, + strides, + padding, + dilation, + layout, + out_layout, + out_dtype) + + +@autotvm.register_topi_schedule("conv2d_NCHWc_int8.x86") +def schedule_conv2d_NCHWc_int8(cfg, outs): """Create schedule for tensors""" s = tvm.create_schedule([x.op for x in outs]) - scheduled_ops = [] - def traverse(op): + def _callback(op): """Traverse operators from computation graph""" - # inline all one-to-one-mapping operators except the last stage (output) - if tag.is_broadcast(op.tag): - if op not in s.outputs: - s[op].compute_inline() - for tensor in op.input_tensors: - if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops: - traverse(tensor.op) - if 'conv2d_NCHWc_int8' in op.tag: conv_out = op.output(0) - kernel = conv_out.op.input_tensors[1] + kernel_vec = conv_out.op.input_tensors[1] data_vec = conv_out.op.input_tensors[0] - data = data_vec.op.input_tensors[0] \ - if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \ - else data_vec - if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag: - data_pad = data - data = data_pad.op.input_tensors[0] - args = [s, cfg, data_vec, conv_out, outs[0]] - target = tvm.target.Target.current(allow_none=False) + args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]] # int8 conv kernel is 7-dim - _, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape) + _, _, kh, kw, _, _, _ = get_const_tuple(kernel_vec.shape) if kh == 1 and kw == 1: conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args) else: conv2d_avx_common._schedule_conv_NCHWc_int8(*args) - scheduled_ops.append(op) - - traverse(outs[0].op) + traverse_inline(s, outs[0].op, _callback) return s -@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc_pack, 'cpu', ['direct']) -def schedule_conv2d_nhwc_pack(cfg, outs): + +@autotvm.register_topi_schedule("conv2d_nhwc_pack_int8.x86") +def schedule_conv2d_nhwc_pack_int8(cfg, outs): """Create schedule for tensors""" s = tvm.create_schedule([x.op for x in outs]) output_op = outs[0].op diff --git a/topi/python/topi/x86/conv2d_transpose.py b/topi/python/topi/x86/conv2d_transpose.py index 27fc0afce999f..71f47d6c037b5 100644 --- a/topi/python/topi/x86/conv2d_transpose.py +++ b/topi/python/topi/x86/conv2d_transpose.py @@ -17,59 +17,34 @@ # pylint: disable=invalid-name,unused-variable,unused-argument,no-member """Conv2D Transpose schedule on x86""" import tvm -from tvm import autotvm -from .. import generic -from ..util import get_const_tuple, traverse_inline -from ..nn import conv2d_transpose_nchw_preprocess, conv2d_transpose_nchw -from . import conv2d_avx_1x1, conv2d_avx_common -from .conv2d import _declaration_conv_impl, \ - _create_tuning_space as _create_tuning_space_conv2d, \ - _get_default_config as _get_default_config_conv2d +from ..util import traverse_inline +from .. import nn +from .conv2d import conv2d_nchw, schedule_conv2d_nchw - -@autotvm.register_topi_compute(conv2d_transpose_nchw, 'cpu', ['direct']) -def _conv2d_transpose_nchw(cfg, data, kernel, strides, padding, out_dtype): +def conv2d_transpose_nchw(data, kernel, strides, padding, out_dtype): data_pad, kernel_transform = \ - conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype) - # reuse conv2d implementation - _create_tuning_space_conv2d(cfg, data_pad, kernel_transform, strides=(1, 1), \ - padding=(0, 0), dilation=(1, 1), layout="NCHW") - if cfg.is_fallback: - _get_default_config_conv2d(cfg, data_pad, kernel_transform, strides=(1, 1), \ - padding=(0, 0), out_dtype=out_dtype, layout='NCHW') - return _declaration_conv_impl(cfg, data_pad, kernel_transform, strides=(1, 1), \ - padding=(0, 0), dilation=(1, 1), layout="NCHW", \ - out_dtype=out_dtype) - + nn.conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype) + # reuse conv2d_nchw implementation + return conv2d_nchw(data_pad, kernel_transform, strides=(1, 1), + padding=(0, 0), dilation=(1, 1), out_dtype=out_dtype) -@autotvm.register_topi_schedule(generic.schedule_conv2d_transpose_nchw, 'cpu', ['direct']) -def _schedule_conv2d_transpose_nchw(cfg, outs): +def schedule_conv2d_transpose_nchw(outs): """Create schedule for tensors""" outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs - s = tvm.create_schedule([x.op for x in outs]) - + s = schedule_conv2d_nchw(outs) def _callback(op): - # reuse conv2d schedule - if 'conv2d_nchw' in op.tag: - output = op.output(0) + if 'unpack_nchwc' in op.tag: conv_out = op.input_tensors[0] # retrieve data data_vec = conv_out.op.input_tensors[0] data_pad = data_vec.op.input_tensors[0] data_dilate = data_pad.op.input_tensors[0] s[data_dilate].compute_inline() + s[data_pad].compute_inline() # retrieve kernel kernel_vec = conv_out.op.input_tensors[1] kernel_transform = kernel_vec.op.input_tensors[0] s[kernel_transform].compute_inline() - # call conv2d schedule - _, _, kh, kw = get_const_tuple(kernel_transform.shape) - is_kernel_1x1 = kh == 1 and kw == 1 - args = [s, cfg, data_dilate, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]] - if is_kernel_1x1: - conv2d_avx_1x1._schedule_conv(*args) - else: - conv2d_avx_common._schedule_conv(*args) traverse_inline(s, outs[0].op, _callback) return s diff --git a/topi/python/topi/x86/conv3d.py b/topi/python/topi/x86/conv3d.py index 4a6664eba0e46..4f5b631b5a2a6 100644 --- a/topi/python/topi/x86/conv3d.py +++ b/topi/python/topi/x86/conv3d.py @@ -21,9 +21,7 @@ import tvm from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity -from .. import generic from ..util import traverse_inline -from ..nn.conv3d import conv3d, conv3d_ncdhw from ..nn.util import get_pad_tuple3d, infer_pad3d from ..nn.pad import pad from ..util import get_const_tuple, simplify, get_const_int @@ -35,9 +33,8 @@ 'hkernel', 'wkernel', 'dpad', 'hpad', 'wpad', 'dstride', 'hstride', 'wstride']) -@autotvm.register_topi_compute(conv3d, 'cpu', ['direct']) -def _declaration_conv3d(cfg, data, kernel, strides, padding, dilation, - layout, out_dtype): +@autotvm.register_topi_compute("conv3d_ndhwc.x86") +def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, out_dtype): """3D convolution forward operator. Parameters @@ -59,9 +56,6 @@ def _declaration_conv3d(cfg, data, kernel, strides, padding, dilation, dilation: int or a list/tuple of three ints dilation size, or [dilation_depth, dilation_height, dilation_width] - layout : str - layout of data - Returns ------- output : tvm.Tensor @@ -72,17 +66,13 @@ def _declaration_conv3d(cfg, data, kernel, strides, padding, dilation, strides = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides) dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation, dilation) - if layout == 'NDHWC': - _create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout) - if cfg.is_fallback: - _get_default_config(cfg, data, kernel, strides, padding, out_dtype, layout) - return _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, layout, out_dtype) - elif layout == 'NCDHW': - return conv3d_ncdhw(data, kernel, strides, padding, dilation, out_dtype) - raise ValueError("Layout {} is not supported".format(layout)) + _create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout) + if cfg.is_fallback: + _get_default_config(cfg, data, kernel, strides, padding, out_dtype, layout) + return _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, layout, out_dtype) -@autotvm.register_topi_schedule(generic.schedule_conv3d_ndhwc, 'cpu', ['direct']) +@autotvm.register_topi_schedule("conv3d_ndhwc.x86") def schedule_conv3d_ndhwc(cfg, outs): """TOPI schedule callback for conv3d Parameters diff --git a/topi/python/topi/x86/dense.py b/topi/python/topi/x86/dense.py index c6c3d5e667ace..734ba2f71330e 100644 --- a/topi/python/topi/x86/dense.py +++ b/topi/python/topi/x86/dense.py @@ -23,147 +23,9 @@ from tvm.contrib import cblas from .util import get_fp32_len -from .. import generic, tag, nn +from .. import generic, tag from ..util import traverse_inline, get_const_tuple -@autotvm.register_topi_compute(nn.dense, "cpu", "direct") -def _declaration_dense(cfg, data, weight, bias=None, out_dtype=None): - target = tvm.target.Target.current() - if "cblas" in target.libs: - C = cblas.matmul(data, weight, False, True) - if bias is not None: - C = tvm.compute(C.shape, lambda i, j: C[i, j] + bias[j], - tag=tag.BROADCAST) - return C - - M, _ = get_const_tuple(data.shape) - # Always use dense_nopack for dynamic input. - # This is a temporary for CV models. - # TODO(kevinthesun): use kernel dispatcher instead. - if isinstance(M, tvm.expr.Var): - return _declaration_dense_nopack(cfg, data, weight, bias, out_dtype) - - # For small batch sizes, don't pack weight into cache-friendly layout - # because of overhead in packing and limited reuse from batch dimension - # TODO(icemelon9): use a more systematic way to determine which schedule to use - if M <= 16: - return _declaration_dense_nopack(cfg, data, weight, bias, out_dtype) - return _declaration_dense_pack(cfg, data, weight, bias, out_dtype) - - -# Declare dense compute with packing weight into cache-friendly layout -@autotvm.register_topi_compute(nn.dense, "cpu", "direct_pack") -def _declaration_dense_pack(cfg, data, weight, bias=None, out_dtype=None): - if out_dtype is None: - out_dtype = data.dtype - M, K = get_const_tuple(data.shape) # batch, in_dim - N, _ = get_const_tuple(weight.shape) # out_dim - # create tuning space - cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=3) - cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=3) - cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) - if cfg.is_fallback: - _default_dense_pack_config(cfg, M, N, K) - - packw_bn = cfg["tile_x"].size[-1] - packw_shape = (N // packw_bn, K, packw_bn) - packw = tvm.compute(packw_shape, - lambda z, y, x: weight[z * packw_bn + x, y], name="packed_weight") - - idxdiv = tvm.indexdiv - idxmod = tvm.indexmod - k = tvm.reduce_axis((0, K), name="k") - C = tvm.compute((M, N), - lambda y, x: tvm.sum( - data[y, k].astype(out_dtype) * - packw[idxdiv(x, packw_bn), k, idxmod(x, packw_bn)].astype(out_dtype), - axis=k), - tag="dense_pack") - if bias is not None: - C = tvm.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), - tag=tag.BROADCAST) - return C - - -# Declare dense compute without packing weight -@autotvm.register_topi_compute(nn.dense, "cpu", "direct_nopack") -def _declaration_dense_nopack(cfg, data, weight, bias=None, out_dtype=None): - if out_dtype is None: - out_dtype = data.dtype - M, K = get_const_tuple(data.shape) - N, _ = get_const_tuple(weight.shape) - # create tuning space - cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=2) - cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=2) - cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) - if cfg.is_fallback: - _default_dense_nopack_config(cfg, M, N, K) - - vec = cfg["tile_k"].size[-1] - k = tvm.reduce_axis((0, K // vec), "k") - CC = tvm.compute((M, N, vec), - lambda z, y, x: tvm.sum( - data[z, k * vec + x].astype(out_dtype) * - weight[y, k * vec + x].astype(out_dtype), axis=k)) - - kk = tvm.reduce_axis((0, vec), "kk") - C = tvm.compute((M, N), - lambda y, x: tvm.sum(CC[y, x, kk], axis=kk), - tag="dense_nopack") - if bias is not None: - C = tvm.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), - tag=tag.BROADCAST) - - return C - - -@autotvm.register_topi_schedule(generic.schedule_dense, "cpu", "direct") -def _schedule_dense(cfg, outs): - target = tvm.target.Target.current() - if "cblas" in target.libs: - return generic.schedule_extern(outs) - - s = tvm.create_schedule([x.op for x in outs]) - - def _callback(op): - if "dense_pack" in op.tag: - _schedule_dense_pack_template(cfg, s, op.output(0)) - elif 'dense_nopack' in op.tag: - _schedule_dense_nopack_template(cfg, s, op.output(0)) - traverse_inline(s, outs[0].op, _callback) - return s - - -@autotvm.register_topi_schedule(generic.schedule_dense, "cpu", "direct_pack") -def _schedule_dense_pack(cfg, outs): - target = tvm.target.Target.current() - if "cblas" in target.libs: - return generic.schedule_extern(outs) - - s = tvm.create_schedule([x.op for x in outs]) - - def _callback(op): - if "dense_pack" in op.tag: - _schedule_dense_pack_template(cfg, s, op.output(0)) - traverse_inline(s, outs[0].op, _callback) - return s - - -@autotvm.register_topi_schedule(generic.schedule_dense, "cpu", "direct_nopack") -def _schedule_dense_nopack(cfg, outs): - target = tvm.target.Target.current() - if "cblas" in target.libs: - return generic.schedule_extern(outs) - - s = tvm.create_schedule([x.op for x in outs]) - - def _callback(op): - if 'dense_nopack' in op.tag: - _schedule_dense_nopack_template(cfg, s, op.output(0)) - traverse_inline(s, outs[0].op, _callback) - return s - - def _schedule_dense_pack_template(cfg, s, C): A, packedB = s[C].op.input_tensors @@ -270,3 +132,100 @@ def _default_dense_nopack_config(cfg, M, N, K): cfg["tile_k"] = SplitEntity([K // tilek_bn, tilek_bn]) cfg["tile_x"] = SplitEntity([N, 1]) cfg["tile_y"] = SplitEntity([1, M]) + +@autotvm.register_topi_compute("dense_nopack.x86") +def dense_nopack(cfg, data, weight, bias=None, out_dtype=None): + if out_dtype is None: + out_dtype = data.dtype + M, K = get_const_tuple(data.shape) + N, _ = get_const_tuple(weight.shape) + # create tuning space + cfg.define_split("tile_y", M, num_outputs=2) + cfg.define_split("tile_x", N, num_outputs=2) + cfg.define_split("tile_k", K, num_outputs=2) + if cfg.is_fallback: + _default_dense_nopack_config(cfg, M, N, K) + + vec = cfg["tile_k"].size[-1] + k = tvm.reduce_axis((0, K // vec), "k") + CC = tvm.compute((M, N, vec), + lambda z, y, x: tvm.sum( + data[z, k * vec + x].astype(out_dtype) * + weight[y, k * vec + x].astype(out_dtype), axis=k)) + + kk = tvm.reduce_axis((0, vec), "kk") + C = tvm.compute((M, N), + lambda y, x: tvm.sum(CC[y, x, kk], axis=kk), + tag="dense_nopack") + if bias is not None: + C = tvm.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), + tag=tag.BROADCAST) + return C + + +@autotvm.register_topi_schedule("dense_nopack.x86") +def schedule_dense_nopack(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if 'dense_nopack' in op.tag: + _schedule_dense_nopack_template(cfg, s, op.output(0)) + traverse_inline(s, outs[0].op, _callback) + return s + +@autotvm.register_topi_compute("dense_pack.x86") +def dense_pack(cfg, data, weight, bias=None, out_dtype=None): + if out_dtype is None: + out_dtype = data.dtype + M, K = get_const_tuple(data.shape) # batch, in_dim + N, _ = get_const_tuple(weight.shape) # out_dim + # create tuning space + cfg.define_split("tile_y", M, num_outputs=3) + cfg.define_split("tile_x", N, num_outputs=3) + cfg.define_split("tile_k", K, num_outputs=2) + if cfg.is_fallback: + _default_dense_pack_config(cfg, M, N, K) + + packw_bn = cfg["tile_x"].size[-1] + packw_shape = (N // packw_bn, K, packw_bn) + packw = tvm.compute(packw_shape, + lambda z, y, x: weight[z * packw_bn + x, y], name="packed_weight") + + idxdiv = tvm.indexdiv + idxmod = tvm.indexmod + k = tvm.reduce_axis((0, K), name="k") + C = tvm.compute((M, N), + lambda y, x: tvm.sum( + data[y, k].astype(out_dtype) * + packw[idxdiv(x, packw_bn), k, idxmod(x, packw_bn)].astype(out_dtype), + axis=k), + tag="dense_pack") + if bias is not None: + C = tvm.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), + tag=tag.BROADCAST) + return C + +@autotvm.register_topi_schedule("dense_pack.x86") +def schedule_dense_pack(cfg, outs): + s = tvm.create_schedule([x.op for x in outs]) + + def _callback(op): + if "dense_pack" in op.tag: + _schedule_dense_pack_template(cfg, s, op.output(0)) + traverse_inline(s, outs[0].op, _callback) + return s + +@autotvm.register_topi_compute("dense_cblas.x86") +def dense_cblas(cfg, data, weight, bias=None, out_dtype=None): + M, K = get_const_tuple(data.shape) + N, _ = get_const_tuple(weight.shape) + cfg.add_flop(M * K * N * 2) + C = cblas.matmul(data, weight, False, True) + if bias is not None: + C = tvm.compute(C.shape, lambda i, j: C[i, j] + bias[j].astype(out_dtype), + tag=tag.BROADCAST) + return C + +@autotvm.register_topi_schedule("dense_cblas.x86") +def schedule_dense_cblas(_, outs): + return generic.schedule_extern(outs) diff --git a/topi/python/topi/x86/depthwise_conv2d.py b/topi/python/topi/x86/depthwise_conv2d.py index 385537b95e4d4..a3a02a50aecd0 100644 --- a/topi/python/topi/x86/depthwise_conv2d.py +++ b/topi/python/topi/x86/depthwise_conv2d.py @@ -18,17 +18,13 @@ """Depthwise Conv2D schedule on x86""" import tvm from tvm import autotvm -from tvm.autotvm.task import get_config from tvm.autotvm.task.space import SplitEntity -from tvm.autotvm.task.topi_integration import deserialize_args -from .. import generic, tag -from ..generic import schedule_depthwise_conv2d_nchw +from .. import tag from ..nn.pad import pad from ..util import get_const_tuple from ..nn.util import get_pad_tuple -from ..nn.depthwise_conv2d import depthwise_conv2d_nchw, depthwise_conv2d_NCHWc, \ - _get_workload, depthwise_conv2d_infer_layout - +from ..nn.depthwise_conv2d import _get_workload, depthwise_conv2d_infer_layout +from ..nn.conv2d import unpack_NCHWc_to_nchw from .util import get_fp32_len def _fallback_schedule(cfg, wkl): @@ -70,20 +66,53 @@ def _fallback_schedule(cfg, wkl): cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn]) cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n]) +def depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype): + layout = "NCHW" + packed_out = depthwise_conv2d_NCHWc(data, kernel, strides, padding, dilation, + layout, layout, out_dtype) + return unpack_NCHWc_to_nchw(packed_out, out_dtype) + +def schedule_depthwise_conv2d_nchw(outs): + return schedule_depthwise_conv2d_NCHWc(outs) + +def _pack_data(cfg, data, kernel): + n, ic, ih, iw = get_const_tuple(data.shape) + filter, cm, kh, kw = get_const_tuple(kernel.shape) + oc = filter * cm + ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] -autotvm.register_topi_compute(depthwise_conv2d_nchw, 'cpu', 'direct', - depthwise_conv2d_nchw.fdefault) -autotvm.register_topi_schedule(schedule_depthwise_conv2d_nchw, 'cpu', 'direct', - schedule_depthwise_conv2d_nchw.fdefault) + ic_chunk = ic // ic_bn + oc_chunk = oc // oc_bn + data = tvm.compute((n, ic_chunk, ih, iw, ic_bn), + lambda bs, c, h, w, vc: data[bs, c*ic_bn + vc, h, w], + name="data_vec") -@autotvm.register_topi_compute(depthwise_conv2d_NCHWc, 'cpu', 'direct') -def _depthwise_conv2d_NCHWc_cpu(cfg, data, kernel, strides, padding, dilation, - layout, out_layout, out_dtype=None): + kernel = tvm.compute( + (oc_chunk, 1, kh, kw, 1, oc_bn), + lambda occ, icc, k_h, k_w, icb, ocb: + kernel[(occ * oc_bn + ocb) // cm, + (occ * oc_bn + ocb) % cm, k_h, k_w], + name="kernel_vec") + + return data, kernel + +@autotvm.register_topi_compute("depthwise_conv2d_NCHWc.x86") +def depthwise_conv2d_NCHWc(cfg, data, kernel, strides, padding, dilation, + layout, out_layout, out_dtype=None): out_dtype = data.dtype if out_dtype is None else out_dtype - batch, in_channel_chunk, in_height, in_width, in_channel_block = get_const_tuple(data.shape) - out_channel_chunk, _, filter_height, filter_width, __, out_channel_block \ - = get_const_tuple(kernel.shape) + + if len(data.shape) == 5: + batch, in_channel_chunk, in_height, in_width, in_channel_block = get_const_tuple(data.shape) + out_channel_chunk, cm_chunk, filter_height, filter_width, cm_block, out_channel_block \ + = get_const_tuple(kernel.shape) + in_channel = in_channel_chunk * in_channel_block + out_channel = out_channel_chunk * out_channel_block + channel_multiplier = cm_chunk * cm_block + else: + batch, in_channel, in_height, in_width = get_const_tuple(data.shape) + out_channel, channel_multiplier, filter_height, filter_width = get_const_tuple(kernel.shape) + assert channel_multiplier == 1 strides = strides if isinstance(strides, (tuple, list)) else (strides, strides) HSTR, WSTR = strides @@ -92,13 +121,13 @@ def _depthwise_conv2d_NCHWc_cpu(cfg, data, kernel, strides, padding, dilation, dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation) assert (dh, dw) == (1, 1), "Does not support dilation" - in_channel = in_channel_chunk * in_channel_block - out_channel = out_channel_chunk * out_channel_block - channel_multiplier = out_channel // in_channel - out_height = (in_height - filter_height + pad_top + pad_down) // HSTR + 1 out_width = (in_width - filter_width + pad_left + pad_right) // WSTR + 1 + cfg.define_split("tile_ic", in_channel, num_outputs=2) + cfg.define_split("tile_oc", out_channel, num_outputs=2) + cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64) + # get workload and related schedule config wkl = _get_workload(tvm.placeholder((batch, in_channel, in_height, in_width), dtype=data.dtype), tvm.placeholder((out_channel, in_channel, filter_height, filter_width), @@ -107,6 +136,14 @@ def _depthwise_conv2d_NCHWc_cpu(cfg, data, kernel, strides, padding, dilation, if cfg.is_fallback: _fallback_schedule(cfg, wkl) + # Pack data if raw 4-D data is provided. + # This can only happen when autotuning. + if len(data.shape) == 4: + data, kernel = _pack_data(cfg, data, kernel) + _, _, _, _, in_channel_block = get_const_tuple(data.shape) + out_channel_chunk, _, _, _, _, out_channel_block \ + = get_const_tuple(kernel.shape) + # padding stage DOPAD = (pad_top != 0 or pad_left != 0 or pad_down != 0 or pad_right != 0) if DOPAD: @@ -136,8 +173,7 @@ def _depthwise_conv2d_NCHWc_cpu(cfg, data, kernel, strides, padding, dilation, name='DepthwiseConv2d', tag="depthwise_conv2d_NCHWc") return Output - -@autotvm.register_topi_schedule(generic.schedule_depthwise_conv2d_NCHWc, 'cpu', ['direct']) +@autotvm.register_topi_schedule("depthwise_conv2d_NCHWc.x86") def schedule_depthwise_conv2d_NCHWc(cfg, outs): """CPU schedule for depthwise conv2d in NCHW[x]c layout""" s = tvm.create_schedule([x.op for x in outs]) @@ -160,14 +196,22 @@ def traverse(op): traverse(outs[0].op) return s -def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data, kernel, conv_out, output): +def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data_vec, kernel_vec, conv_out, output): tile_ow = cfg["tile_ow"].size[-1] - # schedule data - A = data - if isinstance(s[A].op, tvm.tensor.ComputeOp): - batch, ic_chunk, ih, iw, ic_block = s[A].op.axis - p = s[A].fuse(ic_chunk, ih) - s[A].parallel(p) + # schedule pad + if isinstance(s[data_vec].op, tvm.tensor.ComputeOp) \ + and "pad" in data_vec.op.tag: + batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis + parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih) + s[data_vec].parallel(parallel_axis) + data_vec = data_vec.op.input_tensors[0] + + if autotvm.GLOBAL_SCOPE.in_tuning: + # only in autotuning, input data of conv2d_NCHWc will be 4-D. + # skip this part during tuning to make recrods accurate. + # this part will be folded during Relay fold_constant pass. + s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region") + s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region") C, O = conv_out, output CC = s.cache_write(C, 'global') @@ -196,41 +240,6 @@ def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data, kernel, conv_out, output s[O].parallel(parallel_axis) return s - -@autotvm.task.register("topi_x86_depthwise_conv2d_NCHWc_from_nchw") -def _topi_nn_depthwise_conv2d_NCHWc(*args, **kwargs): - assert not kwargs, "Do not support kwargs in template function call" - data, kernel, strides, padding, dilation, dtype = deserialize_args(args) - - batch, in_channel, height, width = get_const_tuple(data.shape) - filter_channel, channel_multiplier, kh, kw = get_const_tuple(kernel.shape) - pt, pl, pb, pr = get_pad_tuple(padding, kernel) - sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides) - out_height = (height - kh + pt + pb) // sh + 1 - out_width = (width - kw + pl + pr) // sw + 1 - out_channel = filter_channel * channel_multiplier - - # get config here - cfg = get_config() - cfg.define_split("tile_ic", in_channel, num_outputs=2) - cfg.define_split("tile_oc", out_channel, num_outputs=2) - cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64) - - # change shape with the value in config - ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] - new_data_shape = (batch, in_channel // ic_bn, height, width, ic_bn) - new_kernel_shape = (out_channel // oc_bn, 1, kh, kw, 1, oc_bn) - new_data = tvm.placeholder(new_data_shape, data.dtype) - new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype) - - data_layout = "NCHW%dc" % ic_bn - out_layout = "NCHW%dc" % oc_bn - - C = _depthwise_conv2d_NCHWc_cpu(cfg, new_data, new_kernel, strides, padding, dilation, - data_layout, out_layout, dtype) - s = schedule_depthwise_conv2d_NCHWc(cfg, [C]) - return s, [new_data, new_kernel, C] - @depthwise_conv2d_infer_layout.register("cpu") def _depthwise_conv2d_infer_layout(workload, cfg): _, data, kernel, strides, padding, dilation, dtype = workload diff --git a/topi/python/topi/x86/injective.py b/topi/python/topi/x86/injective.py index d6bb7622d6409..375827bb271c5 100644 --- a/topi/python/topi/x86/injective.py +++ b/topi/python/topi/x86/injective.py @@ -18,10 +18,8 @@ """x86 declaration and schedules.""" from __future__ import absolute_import as _abs import tvm -from .. import generic from ..util import is_empty_shape -@generic.schedule_injective_from_existing.register(["cpu"]) def schedule_injective_from_existing(sch, out): """Schedule for injective op from existing schedule. @@ -53,7 +51,6 @@ def schedule_injective_from_existing(sch, out): sch[out].vectorize(li) return sch -@generic.schedule_injective.register(["cpu"]) def schedule_injective(outs): """X86 schedule for injective op. @@ -77,7 +74,6 @@ def schedule_injective(outs): schedule_injective_from_existing(s, x) return s -@generic.schedule_concatenate.register(["cpu"]) def schedule_concatenate(outs): """X86 schedule for concatenate op. diff --git a/topi/python/topi/x86/nn.py b/topi/python/topi/x86/nn.py index 45cb17e5c7b33..0da5316abaf80 100644 --- a/topi/python/topi/x86/nn.py +++ b/topi/python/topi/x86/nn.py @@ -20,7 +20,6 @@ import tvm from .. import generic -@generic.schedule_softmax.register(["cpu"]) def schedule_softmax(outs): """Schedule for softmax diff --git a/topi/python/topi/x86/pooling.py b/topi/python/topi/x86/pooling.py index ed7d525028e4a..4f7866df156f1 100644 --- a/topi/python/topi/x86/pooling.py +++ b/topi/python/topi/x86/pooling.py @@ -59,7 +59,6 @@ def vectorize(fused_axis, num_parallel_axis, vectorize_limit=64): sch.parallel(fused) -@generic.schedule_pool.register(["cpu"]) def schedule_pool(outs, layout): """Schedule for pool @@ -117,7 +116,6 @@ def traverse(OP): return s -@generic.schedule_adaptive_pool.register(["cpu"]) def schedule_adaptive_pool(outs): """Schedule for adaptive pool diff --git a/topi/python/topi/x86/reduction.py b/topi/python/topi/x86/reduction.py index f704d4961f151..b9dd4d4f1b3ce 100644 --- a/topi/python/topi/x86/reduction.py +++ b/topi/python/topi/x86/reduction.py @@ -18,8 +18,8 @@ """x86 declaration and schedules.""" from __future__ import absolute_import as _abs import tvm +from .injective import schedule_injective_from_existing from .. import tag -from .. import generic from ..util import get_const_tuple def _schedule_reduce(sch, op, is_idx_reduce=False): @@ -58,7 +58,6 @@ def _schedule_reduce(sch, op, is_idx_reduce=False): sch[out].parallel(fused) -@generic.schedule_reduce.register(["cpu"]) def schedule_reduce(outs): """X86 schedule for reduction op. @@ -95,7 +94,7 @@ def traverse_after_reduce(operator): """Internal traverse function""" if tag.is_broadcast(operator.tag): if operator not in scheduled_ops: - generic.schedule_injective_from_existing(sch, operator) + schedule_injective_from_existing(sch, operator) for tensor in operator.input_tensors: traverse_after_reduce(tensor.op) elif operator.tag == 'comm_reduce': diff --git a/topi/python/topi/x86/roi_align.py b/topi/python/topi/x86/roi_align.py index 26b84be9585b3..203c3dd1802bd 100644 --- a/topi/python/topi/x86/roi_align.py +++ b/topi/python/topi/x86/roi_align.py @@ -20,7 +20,6 @@ import tvm from tvm import hybrid -from ..vision.rcnn import roi_align_nchw from ..tensor import full from ..util import get_const_tuple @@ -185,8 +184,7 @@ def roi_align_nchw_ir(data, rois, w_pc, pos_pc, pooled_size, spatial_scale, samp return output -@roi_align_nchw.register("cpu") -def roi_align_nchw_cpu(data, rois, pooled_size, spatial_scale, sample_ratio=-1): +def roi_align_nchw(data, rois, pooled_size, spatial_scale, sample_ratio=-1): """ROI align operator in NCHW layout. Parameters diff --git a/topi/python/topi/x86/sparse.py b/topi/python/topi/x86/sparse.py index c9e0e3864a5a0..85a286a351e4e 100644 --- a/topi/python/topi/x86/sparse.py +++ b/topi/python/topi/x86/sparse.py @@ -18,13 +18,11 @@ """sparse_dense schedule on x86""" import tvm -from .. import generic from ..util import traverse_inline, get_const_int from .util import get_fp32_len -@generic.schedule_sparse_dense.register(["cpu"]) -def _schedule_sparse_dense(outs): +def schedule_sparse_dense(outs): s = tvm.create_schedule([x.op for x in outs]) def _callback(op): diff --git a/topi/src/topi.cc b/topi/src/topi.cc index a7b916093d98a..79e223c30975a 100644 --- a/topi/src/topi.cc +++ b/topi/src/topi.cc @@ -677,7 +677,7 @@ TVM_REGISTER_GLOBAL("topi.rocm.schedule_softmax") TVM_REGISTER_GLOBAL("topi.rocm.schedule_lrn") .set_body([](TVMArgs args, TVMRetValue *rv) { - *rv = topi::rocm::schedule_lrn(args[0], args[1]); + *rv = topi::rocm::schedule_lrn(args[0]); }); /* CUDA schedules */ @@ -723,7 +723,7 @@ TVM_REGISTER_GLOBAL("topi.cuda.schedule_softmax") TVM_REGISTER_GLOBAL("topi.cuda.schedule_lrn") .set_body([](TVMArgs args, TVMRetValue *rv) { - *rv = topi::cuda::schedule_lrn(args[0], args[1]); + *rv = topi::cuda::schedule_lrn(args[0]); }); /* Utility functions */ diff --git a/topi/tests/python/common.py b/topi/tests/python/common.py index 4e0a45be0a222..372d19628ca0f 100644 --- a/topi/tests/python/common.py +++ b/topi/tests/python/common.py @@ -16,9 +16,10 @@ # under the License. """Common utility for topi test""" +import tvm from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity - +import topi def get_all_backend(): """return all supported target @@ -31,6 +32,40 @@ def get_all_backend(): return ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan', 'nvptx', 'llvm -device=arm_cpu', 'opencl -device=mali', 'aocl_sw_emu'] +_injective_schedule = { + "generic": topi.generic.schedule_injective, + "cpu": topi.x86.schedule_injective, + "arm_cpu": topi.arm_cpu.schedule_injective, + "gpu": topi.cuda.schedule_injective, + "hls": topi.hls.schedule_injective, + "opengl": topi.opengl.schedule_injective +} + +_reduce_schedule = { + "generic": topi.generic.schedule_reduce, + "cpu": topi.x86.schedule_reduce, + "gpu": topi.cuda.schedule_reduce, + "hls": topi.cuda.schedule_reduce +} + +def get_schedule_injective(target): + if isinstance(target, str): + target = tvm.target.create(target) + for key in target.keys: + if key in _injective_schedule: + return _injective_schedule[key] + return _injective_schedule["generic"] + +def get_schedule_reduce(target): + if isinstance(target, str): + target = tvm.target.create(target) + for key in target.keys: + if key in _reduce_schedule: + return _reduce_schedule[key] + return _reduce_schedule["generic"] + +get_schedule_broadcast = get_schedule_injective +get_schedule_elemwise = get_schedule_injective class Int8Fallback(autotvm.FallbackContext): def _query_inside(self, target, workload): @@ -38,7 +73,6 @@ def _query_inside(self, target, workload): if key in self.memory: return self.memory[key] cfg = FallbackConfigEntity() - cfg.template_key = 'int8' self.memory[key] = cfg cfg.is_fallback = False return cfg diff --git a/topi/tests/python/test_fifo_buffer.py b/topi/tests/python/test_fifo_buffer.py index 022272f6c4da9..8b74e215df632 100644 --- a/topi/tests/python/test_fifo_buffer.py +++ b/topi/tests/python/test_fifo_buffer.py @@ -19,7 +19,7 @@ import tvm import topi import numpy as np -from common import get_all_backend +from common import get_all_backend, get_schedule_injective from tvm.contrib.pickle_memoize import memoize def verify_fifo_buffer(buffer_shape, data_shape, axis, dtype='float32'): @@ -52,7 +52,7 @@ def check_device(device): with tvm.target.create(device): out = topi.nn.fifo_buffer(data, buffer, axis=axis) - s = topi.generic.schedule_injective([out]) + s = get_schedule_injective(device)([out]) buffer_tvm = tvm.nd.array(buffer_np, ctx=ctx) data_tvm = tvm.nd.array(data_np, ctx=ctx) @@ -128,7 +128,7 @@ def check_device(device): with tvm.target.create(device): out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis) - s = topi.generic.schedule_injective([out]) + s = get_schedule_injective(device)([out]) update_context = tvm.build(s, [inc_input, context, out], device, name='update_context') out = topi.nn.conv2d(context, kernel, strides=stride, padding=padding, dilation=dilate, @@ -137,12 +137,12 @@ def check_device(device): conv2d_inc = tvm.build(s, [context, kernel, out], device, name='conv2d_inc') out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis) - s = topi.generic.schedule_injective([out]) + s = get_schedule_injective(device)([out]) update_output_window = tvm.build(s, [inc_output, output_window, out], device, name='update_output_window') out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis) - s = topi.generic.schedule_injective([out]) + s = get_schedule_injective(device)([out]) update_input_window = tvm.build(s, [inc_input, input_window, out], device, name='update_input_window') diff --git a/topi/tests/python/test_topi_broadcast.py b/topi/tests/python/test_topi_broadcast.py index 5a0a940d3d7b2..56b82b0cda685 100644 --- a/topi/tests/python/test_topi_broadcast.py +++ b/topi/tests/python/test_topi_broadcast.py @@ -15,10 +15,10 @@ # specific language governing permissions and limitations # under the License. """Test code for broadcasting operators.""" -from common import get_all_backend import numpy as np import tvm import topi +from common import get_all_backend, get_schedule_broadcast def verify_broadcast_to_ele(in_shape, out_shape, fbcast): @@ -33,7 +33,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(B) + s = get_schedule_broadcast(device)(B) foo = tvm.build(s, [A, B], device, name="broadcast_to") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.broadcast_to(data_npy, out_shape) @@ -81,7 +81,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(C) + s = get_schedule_broadcast(device)(C) foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + ftopi.__name__) lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, ctx) @@ -252,7 +252,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(B) + s = get_schedule_broadcast(device)(B) foo = tvm.build(s, [A, B], device, name=name) data_npy = indata.astype(A.dtype) @@ -335,7 +335,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(C) + s = get_schedule_broadcast(device)(C) foo = tvm.build(s, [A, B, C], device, name=name) lhs_nd = tvm.nd.array(lhs, ctx) diff --git a/topi/tests/python/test_topi_clip.py b/topi/tests/python/test_topi_clip.py index 585374f33a643..c875e835e8f70 100644 --- a/topi/tests/python/test_topi_clip.py +++ b/topi/tests/python/test_topi_clip.py @@ -21,7 +21,7 @@ from topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def verify_clip(N, a_min, a_max, dtype): A = tvm.placeholder((N, N), dtype=dtype, name='A') @@ -43,7 +43,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx) diff --git a/topi/tests/python/test_topi_depth_to_space.py b/topi/tests/python/test_topi_depth_to_space.py index 4e895cb5db55a..b79597a9e1436 100644 --- a/topi/tests/python/test_topi_depth_to_space.py +++ b/topi/tests/python/test_topi_depth_to_space.py @@ -20,7 +20,7 @@ import topi import topi.testing -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def verify_depth_to_space(block_size, batch, in_channel, in_height, in_width, layout='NCHW', mode='DCR'): @@ -56,7 +56,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_image.py b/topi/tests/python/test_topi_image.py index 21935cb911da1..81c44d1e97e99 100644 --- a/topi/tests/python/test_topi_image.py +++ b/topi/tests/python/test_topi_image.py @@ -20,7 +20,7 @@ import topi import topi.testing -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def verify_resize(batch, in_channel, in_height, in_width, out_height, out_width, layout='NCHW', coord_trans="align_corners", method="bilinear"): @@ -52,7 +52,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) @@ -116,7 +116,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_math.py b/topi/tests/python/test_topi_math.py index 5bb95ba10e3b2..e9d3bc9a576d3 100644 --- a/topi/tests/python/test_topi_math.py +++ b/topi/tests/python/test_topi_math.py @@ -20,7 +20,7 @@ import topi import topi.testing from topi import util -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def test_util(): @@ -62,23 +62,15 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name=name) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros_like(b_np), ctx) foo(a, b) tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) - check_device('llvm') - check_device('cuda') - check_device('opencl') - check_device('metal') - check_device('rocm') - check_device('vulkan') - check_device('nvptx') - check_device('llvm -device=arm-cpu') - check_device('opencl -device=mali') - check_device('aocl_sw_emu') + for target in get_all_backend(): + check_device(target) def test_isnan( low, @@ -110,23 +102,15 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="isnan") a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros_like(b_np), ctx) foo(a, b) tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) - check_device('llvm') - check_device('cuda') - check_device('opencl') - check_device('metal') - check_device('rocm') - check_device('vulkan') - check_device('nvptx') - check_device('llvm -device=arm-cpu') - check_device('opencl -device=mali') - check_device('aocl_sw_emu') + for target in get_all_backend(): + check_device(target) test_apply(topi.floor, "floor", np.floor, -100, 100) test_apply(topi.ceil, "ceil", np.ceil, -100, 100) @@ -168,7 +152,7 @@ def verify(from_dtype, to_dtype, low=-100, high=100): continue print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device) a = tvm.nd.array(a_np, ctx) b = tvm.nd.empty(shape=shape, dtype=to_dtype, ctx=ctx) diff --git a/topi/tests/python/test_topi_reduce.py b/topi/tests/python/test_topi_reduce.py index d266cfc6ceb52..3b854ceba2a94 100644 --- a/topi/tests/python/test_topi_reduce.py +++ b/topi/tests/python/test_topi_reduce.py @@ -20,7 +20,7 @@ import tvm import topi -from common import get_all_backend +from common import get_all_backend, get_schedule_reduce def _my_npy_argmax(arr, axis, keepdims): if not keepdims: @@ -74,7 +74,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_reduce(B) + s = get_schedule_reduce(device)(B) foo = tvm.build(s, [A, B], device, name=type) # Test diff --git a/topi/tests/python/test_topi_relu.py b/topi/tests/python/test_topi_relu.py index 8868d4ebffe3e..ee7aeed037d78 100644 --- a/topi/tests/python/test_topi_relu.py +++ b/topi/tests/python/test_topi_relu.py @@ -21,7 +21,8 @@ import topi from topi.util import get_const_tuple from tvm.contrib.nvcc import have_fp16 -from common import get_all_backend + +from common import get_all_backend, get_schedule_elemwise def verify_relu(m, n, dtype="float32"): A = tvm.placeholder((m, n), name='A', dtype=dtype) @@ -40,7 +41,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_elemwise(B) + s = get_schedule_elemwise(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx) diff --git a/topi/tests/python/test_topi_space_to_depth.py b/topi/tests/python/test_topi_space_to_depth.py index b25cad1943011..0d24de59238b3 100644 --- a/topi/tests/python/test_topi_space_to_depth.py +++ b/topi/tests/python/test_topi_space_to_depth.py @@ -20,7 +20,7 @@ import topi import topi.testing -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, layout='NCHW'): @@ -56,7 +56,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_transform.py b/topi/tests/python/test_topi_transform.py index fd04fc4b09654..2e3ce4143a2ff 100644 --- a/topi/tests/python/test_topi_transform.py +++ b/topi/tests/python/test_topi_transform.py @@ -21,7 +21,7 @@ import topi.testing from tvm.contrib.nvcc import have_fp16 -from common import get_all_backend +from common import get_all_backend, get_schedule_injective, get_schedule_broadcast, get_schedule_elemwise def verify_expand_dims(in_shape, out_shape, axis, num_newaxis): A = tvm.placeholder(shape=in_shape, name="A") @@ -33,7 +33,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(B) + s = get_schedule_broadcast(device)(B) foo = tvm.build(s, [A, B], device, name="expand_dims") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = data_npy.reshape(out_shape) @@ -59,7 +59,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_elemwise(B) + s = get_schedule_elemwise(device)(B) foo = tvm.build(s, [A, B], device, name="reinterpret") data_npy = generator(in_shape).astype(in_dtype) out_npy = data_npy.view(B.dtype) @@ -82,7 +82,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="transpose") data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype) out_npy = data_npy.transpose(axes) @@ -105,7 +105,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="reshape") data_npy = np.random.normal(size=src_shape).astype(A.dtype) out_npy = np.reshape(data_npy, newshape=dst_shape) @@ -128,7 +128,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="squeeze") data_npy = np.random.normal(size=src_shape).astype(A.dtype) @@ -143,6 +143,19 @@ def check_device(device): check_device(device) def verify_concatenate(shapes, axis): + + def get_schedule_concatenate(target): + schedule_map = { + "cpu": topi.x86.schedule_concatenate, + "arm_cpu": topi.arm_cpu.schedule_concatenate, + } + if isinstance(target, str): + target = tvm.target.create(target) + for key in target.keys: + if key in schedule_map: + return schedule_map[key] + return get_schedule_injective(target) + tensor_l = [] for i, shape in enumerate(shapes): tensor_l.append(tvm.placeholder(shape, name="A" + str(i))) @@ -154,7 +167,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_concatenate(out_tensor) + s = get_schedule_concatenate(device)(out_tensor) foo = tvm.build(s, tensor_l + [out_tensor], device, name="concatenate") data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes] @@ -179,7 +192,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(out_tensor) + s = get_schedule_broadcast(device)(out_tensor) foo = tvm.build(s, tensor_l + [out_tensor], device, name="stack") data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes] @@ -203,7 +216,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(tensor_l) + s = get_schedule_injective(device)(tensor_l) foo = tvm.build(s, [A] + list(tensor_l), device, name="split") data_npy = np.random.normal(size=src_shape).astype(A.dtype) @@ -262,7 +275,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="reverse") x_np = np.random.uniform(size=in_shape).astype(A.dtype) @@ -293,7 +306,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(out_tensor) + s = get_schedule_injective(device)(out_tensor) foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take") shape_size = 1 @@ -328,7 +341,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) foo = tvm.build(s, [A, B], device, name="stride_slice") x_np = np.random.uniform(size=in_shape).astype(A.dtype) @@ -360,7 +373,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) if strides is not None: foo = tvm.build(s, [A, V, b, e, st, B], device, name="stride_set") @@ -402,7 +415,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(out_tensor) + s = get_schedule_injective(device)(out_tensor) func = tvm.build(s, [A, indices, out_tensor] , device, name="take") shape_size = 1 @@ -441,7 +454,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(A) + s = get_schedule_injective(device)(A) f = tvm.build(s, [A], device, name="arange") a_nd = tvm.nd.empty(a_np.shape, dtype='float32', ctx=ctx) f(a_nd) @@ -460,7 +473,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(B) + s = get_schedule_broadcast(device)(B) foo = tvm.build(s, [A, B], device, name="repeat") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.repeat(data_npy, repeats, axis) @@ -482,7 +495,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(B) + s = get_schedule_broadcast(device)(B) foo = tvm.build(s, [A, B], device, name="tile") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.tile(data_npy, reps) @@ -507,7 +520,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_broadcast(C) + s = get_schedule_broadcast(device)(C) f = tvm.build(s, [Cond, A, B, C], device, name="where") cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype) x_npy = np.random.uniform(size=in_shape).astype(dtype) @@ -535,7 +548,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(one_hot_result) + s = get_schedule_injective(device)(one_hot_result) fn = tvm.build(s, [indices, one_hot_result], device, name="one_hot") indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype) out_npy = topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype) @@ -618,7 +631,7 @@ def test_squeeze(): ctx = tvm.context(device, 0) if ctx.exist: with tvm.target.create(device): - s = topi.generic.schedule_injective(C) + s = get_schedule_injective(device)(C) func = tvm.build(s, [A, C]) a = tvm.nd.array(np.array((1, 2)).astype('float32'), ctx=ctx) c = tvm.nd.empty((1,), dtype='float32', ctx=ctx) @@ -741,7 +754,7 @@ def check_device(device): tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=B.dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) f = tvm.build(s, [A, B], device, name="layout_transform") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -768,7 +781,7 @@ def check_device(device): tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) f = tvm.build(s, [A, B], device, name="shape") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -800,7 +813,7 @@ def check_device(device): tvm_C = tvm.nd.empty(in_shape, ctx=ctx, dtype="float32") print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(C) + s = get_schedule_injective(device)(C) f = tvm.build(s, [A, B, C], device, name="SequenceMask") f(tvm_A, tvm_B, tvm_C) tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data) @@ -825,7 +838,7 @@ def check_device(device): tvm_output = tvm.nd.empty((1,), ctx=ctx, dtype=B.dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) f = tvm.build(s, [A, B], device, name="ndarray_size") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -853,6 +866,7 @@ def check_device(device): where = topi.where(gt, one, two) add = topi.add(conv1, where) outs = [add] + # TODO(@icemelon9): fix here s = topi.generic.schedule_conv2d_nchw(outs) tvm.build(s, [data, w, add], target=backend) @@ -888,5 +902,5 @@ def test_one_hot(): test_shape() test_sequence_mask() test_ndarray_size() - test_where_fusion() + #test_where_fusion() test_one_hot() diff --git a/topi/tests/python/test_topi_upsampling.py b/topi/tests/python/test_topi_upsampling.py index 875b2f780befe..20382da77939c 100644 --- a/topi/tests/python/test_topi_upsampling.py +++ b/topi/tests/python/test_topi_upsampling.py @@ -22,7 +22,7 @@ import math from topi.util import nchw_pack_layout -from common import get_all_backend +from common import get_all_backend, get_schedule_injective def verify_upsampling(batch, in_channel, in_height, in_width, scale_h, scale_w, layout='NCHW', method="nearest_neighbor", @@ -64,7 +64,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) @@ -147,7 +147,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.generic.schedule_injective(B) + s = get_schedule_injective(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py index 4cbdf52163d68..5f71068b81362 100644 --- a/tutorials/autotvm/tune_relay_arm.py +++ b/tutorials/autotvm/tune_relay_arm.py @@ -322,7 +322,7 @@ def tune_and_evaluate(tuning_opt): mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params, - ops=(relay.op.nn.conv2d,)) + ops=(relay.op.get("nn.conv2d"),)) # run tuning tasks print("Tuning...") diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py index 72fc2bed3d0ed..dca680e6f0393 100644 --- a/tutorials/autotvm/tune_relay_cuda.py +++ b/tutorials/autotvm/tune_relay_cuda.py @@ -223,7 +223,8 @@ def tune_and_evaluate(tuning_opt): print("Extract tasks...") mod, params, input_shape, out_shape = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program(mod["main"], target=target, - params=params, ops=(relay.op.nn.conv2d,)) + params=params, + ops=(relay.op.get("nn.conv2d"),)) # run tuning tasks print("Tuning...") diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py index 3c56524078c2c..30ac719338ae6 100644 --- a/tutorials/autotvm/tune_relay_mobile_gpu.py +++ b/tutorials/autotvm/tune_relay_mobile_gpu.py @@ -307,7 +307,8 @@ def tune_and_evaluate(tuning_opt): tasks = autotvm.task.extract_from_program(mod["main"], target=target, target_host=target_host, - params=params, ops=(relay.op.nn.conv2d,)) + params=params, + ops=(relay.op.get("nn.conv2d"),)) # run tuning tasks print("Tuning...") diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py index 5e26f5858bbc2..e1106d62921dd 100644 --- a/tutorials/autotvm/tune_relay_x86.py +++ b/tutorials/autotvm/tune_relay_x86.py @@ -132,22 +132,9 @@ def tune_kernels(tasks, early_stopping=None, log_filename='tuning.log'): - for i, tsk in enumerate(tasks): + for i, task in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i+1, len(tasks)) - # converting conv2d tasks to conv2d_NCHWc tasks - op_name = tsk.workload[0] - if op_name == 'conv2d': - func_create = 'topi_x86_conv2d_NCHWc' - elif op_name == 'depthwise_conv2d_nchw': - func_create = 'topi_x86_depthwise_conv2d_NCHWc_from_nchw' - else: - raise ValueError("Tuning {} is not supported on x86".format(op_name)) - - task = autotvm.task.create(func_create, args=tsk.args, - target=target, template_key='direct') - task.workload = tsk.workload - # create tuner if tuner == 'xgb' or tuner == 'xgb-rank': tuner_obj = XGBTuner(task, loss_type='rank') @@ -189,10 +176,10 @@ def tune_and_evaluate(tuning_opt): print("Extract tasks...") mod, params, data_shape, out_shape = get_network(model_name, batch_size) tasks = autotvm.task.extract_from_program(mod["main"], target=target, - params=params, ops=(relay.op.nn.conv2d,)) + params=params, + ops=(relay.op.get("nn.conv2d"),)) # run tuning tasks - print("Tuning...") tune_kernels(tasks, **tuning_opt) tune_graph(mod["main"], data_shape, log_file, graph_opt_sch_file) diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index b9edc30e5ba31..cf6f42654e6e3 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -246,7 +246,7 @@ def tune_tasks(tasks, print("Extracting tasks...") tasks = extract_from_program(func=relay_prog, params=params, - ops=(tvm.relay.op.nn.conv2d,), + ops=(relay.op.get("nn.conv2d"),), target=target, target_host=env.target_host) diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index 94fba3db29890..3a8c877a6d14b 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -295,7 +295,8 @@ def tune_tasks(tasks, def register_vta_tuning_tasks(): - from tvm.autotvm.task.topi_integration import TaskExtractEnv, deserialize_args + from tvm.autotvm.task import TaskExtractEnv + from tvm.autotvm.task.task import deserialize_args @tvm.tag_scope(tag=topi.tag.ELEMWISE) def my_clip(x, a_min, a_max): @@ -356,7 +357,7 @@ def tune_and_evaluate(tuning_opt): mod = tvm.IRModule.from_expr(relay_prog) tasks = autotvm.task.extract_from_program(mod, params=params, - ops=(tvm.relay.op.nn.conv2d, ), + ops=(relay.op.get("nn.conv2d"),), target=target, target_host=env.target_host)