Skip to content
Permalink

Comparing changes

This is a direct comparison between two commits made in this repository or its related repositories. View the default comparison for this range or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: apache/tvm
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 4b639b590cb7d96040d0840e29351d358e5fe4c1
Choose a base ref
..
head repository: apache/tvm
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: 686da01fe7789dc5c4bcbd25371e6ad7f50fb38b
Choose a head ref
207 changes: 207 additions & 0 deletions include/tvm/relay/pass.h
Original file line number Diff line number Diff line change
@@ -2,18 +2,225 @@
* Copyright (c) 2018 by Contributors
* \file tvm/relay/pass.h
* \brief The set of Relay passes written in C++.
*
* This file also implements a pass manager. The pass manager manages a sequence
* of Relay-to-Relay transformation passes over a particlar unit of AST. The
* design is largely inspired from LLVM's pass manager and modern deep learning
* frameworks that perform tensor->tensor transformations.
*
* The responsibilities of a traditional compiler pass manager usually involves:
* - Organizing the execution order of optimization passes though not
* necessarily in the optimal sequence.
* - Collecting required analysis information and keep them up-to-date.
* - Reducing the effort required to implement new passes for compiler
* developers, etc.
*
* Similar to LLVM's pass manager, we designed the Relay pass manager to work
* different granularity, i.e. module level, function level, and even sequential
* passe that contains a host of passes.
*
* However, we also extend the functionality of the traditional pass manager
* with the consideration of requirements/convention from deep learning
* frameworks, such as Pytorch and Gluon, etc. Each pass in the Relay pass
* manager performs the Relay.Module -> Relay.Module transformation. All
* different types of passes, including the sequential-level pass object, are
* essentially pass objects. This design, therefore, effectively provides users
* a consistent and convenient interface, i.e. Pass, to play with. It offers a
* means to ease the development and testing of Relay passes. For example, with
* the pass manager, external users will be able to have custom passes correctly
* scheduled without having to modify a single handcrafted pass order.
*
* In the future we need to describe constraints between passes. For example,
* we may want to preserve dependencies between different passes and validate
* them on the completion of a certain pass.
*
* We also need to store side information and import the error reporting system.
*/
#ifndef TVM_RELAY_PASS_H_
#define TVM_RELAY_PASS_H_

#include <tvm/ir.h>
#include <tvm/packed_func_ext.h>
#include <tvm/relay/error.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/module.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/type.h>

#include <string>
#include <vector>

namespace tvm {
namespace relay {

namespace pass {

/*
* \brief The context of pass.
*/
class PassContext;

/*!
* \brief PassContextNode contains the information that a pass can rely on, such as
* analysis results.
*/
class PassContextNode : public RelayNode {
public:
/*!
* \brief The error reporter used to notify users why an optimization fails.
*/
ErrorReporter err_reporter;

PassContextNode() = default;

void VisitAttrs(tvm::AttrVisitor* v) final {
}

TVM_DLL static PassContext make();

static constexpr const char* _type_key = "relay.PassContext";
TVM_DECLARE_NODE_TYPE_INFO(PassContextNode, RelayNode);
};

TVM_DEFINE_NODE_REF(PassContext, PassContextNode)

/*
* \brief The meta data of a pass.
*
* PassInfo can be extended conveniently in the future if more meta information
* is needed.
*/
class PassInfo;

/*!
* \brief PassInfoNode contains meta data that will be used to help optimization
* and analysis.
*/
class PassInfoNode : public RelayNode {
public:
/*! \brief The minimal optimization level that this pass will be enabled. */
int opt_level;

/*! \brief The name of an optimization/analysis pass. */
std::string name;

/*! \brief The passes that are required to perform the current pass. */
tvm::Array<tvm::Expr> required;

PassInfoNode() = default;

void VisitAttrs(tvm::AttrVisitor* v) final {
v->Visit("opt_level", &opt_level);
v->Visit("name", &name);
v->Visit("required", &required);
}

TVM_DLL static PassInfo make(int opt_level, std::string name,
tvm::Array<tvm::Expr> required);

static constexpr const char* _type_key = "relay.PassInfo";
TVM_DECLARE_NODE_TYPE_INFO(PassInfoNode, RelayNode);
};

TVM_DEFINE_NODE_REF(PassInfo, PassInfoNode)

class Pass;

/*!
* \brief PassNode is the base type of differnt types of optimization passes.
* It is designed as a pure class and implemented by different pass subclasses
* at different granularity of Relay nodes.
*/
class PassNode : public RelayNode {
public:
/*
* \brief Get the pass information/meta data. */
virtual PassInfo Info() const = 0;

/*!
* \brief Set the context information for a pass.
*
* \param pass_ctx The context information for a certain pass.
*/
virtual void SetContext(const PassContext& pass_ctx) = 0;

/*!
* \brief Execute the optimization pass using a functor.
*
* \param mod The module that an optimization pass runs on.
*
* \return The updated module.
*/
virtual Module operator()(const Module& mod) const = 0;

void VisitAttrs(tvm::AttrVisitor* v) override {}

static constexpr const char* _type_key = "relay.Pass";
TVM_DECLARE_BASE_NODE_INFO(PassNode, RelayNode);
};

class Pass : public NodeRef {
public:
Pass() = default;
explicit Pass(NodePtr<tvm::Node> p) : NodeRef(p) {}

PassNode* operator->() const {
return static_cast<PassNode*>(this->node_.get());
}

using ContainerType = PassNode;
};

/*
* \brief Create a module pass.
*
* \param pass_func The packed function that contains the optimization.
* \param opt_level The optimization level of the module pass.
* \param name The name of the module pass.
* \param required The list of the passes that the module pass is dependent on.
*
* \return The created module pass.
*/
Pass CreateModulePass(
const runtime::TypedPackedFunc<Module(Module, PassContext)>& pass_func,
int opt_level,
const std::string& name,
const tvm::Array<tvm::Expr>& required);

/*
* \brief Create a function pass.
*
* \param pass_func The packed function that contains the optimization.
* \param opt_level The optimization level of the function pass.
* \param name The name of the function pass.
* \param required The list of the passes that the function pass is dependent on.
*
* \return The created function pass.
*/
Pass CreateFunctionPass(
const runtime::TypedPackedFunc<Function(Function, PassContext)>& pass_func,
int opt_level,
const std::string& name,
const tvm::Array<tvm::Expr>& required);
/*
* \brief Create a sequential pass.
*
* \param passes The optimization passes will be performed.
* \param opt_level The optimization level of the sequential pass.
* \param name The name of the sequential pass.
* \param required The list of the passes that the sequential pass is dependent on.
* \param disabled The disabled passes.
*
* \return The created sequential pass.
*/
Pass CreateSequentialPass(const tvm::Array<Pass>& passes,
int opt_level,
const std::string& name,
const tvm::Array<tvm::Expr>& required,
const tvm::Array<tvm::Expr>& disabled);

} // namespace pass

/*!
* \brief Infer the type of an expression.
*
8 changes: 4 additions & 4 deletions nnvm/tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
@@ -50,7 +50,7 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, target='llvm'
else:
shape_dict = {input_node: input_data.shape}
dtype_dict = {input_node: input_data.dtype}

sym, params = nnvm.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict, outputs=out_names)
graph, lib, params = nnvm.compiler.build(sym, target=target, target_host=target_host, shape=shape_dict,
dtype=dtype_dict, params=params)
@@ -126,7 +126,7 @@ def compare_tf_with_tvm(in_data, in_name, out_name, init_global_variables=False,

tvm_output = run_tvm_graph(final_graph_def, in_data, in_node,
num_output=len(out_node), target=device, out_names=out_name)
# since the names from tensorflow and nnvm runs are not exactly same,
# since the names from tensorflow and nnvm runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
@@ -619,7 +619,7 @@ def test_forward_multi_output():
out_name = ['out1:0', 'out2:0']
out_node = [out.strip(':0') for out in out_name]
in_node = [inp.strip(':0') for inp in in_name]

with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(add_shapes=True), out_node,)
@@ -1123,7 +1123,7 @@ def test_forward_leaky_relu():
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, 'Placeholder:0', 'LeakyRelu/mul:0')
compare_tf_with_tvm(inp_array, 'Placeholder:0', 'LeakyRelu:0')

def test_forward_elu():
ishape = (1, 3, 10, 10)
6 changes: 5 additions & 1 deletion python/tvm/autotvm/task/task.py
Original file line number Diff line number Diff line change
@@ -338,7 +338,7 @@ def _count_flop(exp):
expr.Max, expr.Min,
expr.EQ, expr.NE, expr.LT, expr.LE, expr.GT, expr.GE,
expr.And, expr.Or, expr.Not)):
base = 1 if "float" in exp.a.dtype else 0
base = 1

if isinstance(exp, expr.Not): # unary
return base + _count_flop(exp.a)
@@ -348,6 +348,10 @@ def _count_flop(exp):
return _count_flop(exp.condition) + max(_count_flop(exp.true_value),
_count_flop(exp.false_value))
if isinstance(exp, expr.Call):
if exp.call_type == expr.Call.Halide:
# Ignore flops from indexing expressions.
return 0

return sum([_count_flop(x) for x in exp.args])

raise FlopCalculationError("Found unsupported operator in the compute expr")
11 changes: 11 additions & 0 deletions python/tvm/relay/__init__.py
Original file line number Diff line number Diff line change
@@ -79,6 +79,9 @@
var = expr.var
const = expr.const
bind = expr.bind
module_pass = ir_pass.module_pass
function_pass = ir_pass.function_pass
sequential_pass = ir_pass.sequential_pass

# ExprFunctor
ExprFunctor = expr_functor.ExprFunctor
@@ -90,3 +93,11 @@
# Param Serialization
save_param_dict = param_dict.save_param_dict
load_param_dict = param_dict.load_param_dict

# Pass manager
PassInfo = ir_pass.PassInfo
PassContext = ir_pass.PassContext
Pass = ir_pass.Pass
ModulePass = ir_pass.ModulePass
FunctionPass = ir_pass.FunctionPass
SequentialPass = ir_pass.SequentialPass
57 changes: 56 additions & 1 deletion python/tvm/relay/_ir_pass.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,60 @@
from .env import Module
import tvm
from . import ir
from .base import NodeBase
from .env import Module


class PassContext(NodeBase):
def __init__(self):
...

class PassInfo(NodeBase):
name = ... # type: str
opt_level = ... # type: int
required = ... # type: list

def __init__(self, name, opt_level, required)
# type: (str, int, list) -> None


class Pass(NodeBase):
def __init__(self):
...


class ModulePass(Pass):
name = ... # type: str
opt_level = ... # type: int
pass_func = ... # type: Callable
required = ... # type: list

def __init__(self, name, opt_level, pass_func, required):
# type: (str, int, Callable, list) -> None
...


class FunctionPass(Pass):
name = ... # type: str
opt_level = ... # type: int
pass_func = ... # type: Callable
required = ... # type: list

def __init__(self, name, opt_level, pass_func, required):
# type: (str, int, Callable, list) -> None
...


class SequentialPass(Pass):
name = ... # type: str
opt_level = ... # type: int
passes = ... # type: list
required = ... # type: list
disabled = ... # type: list

def __init__(self, name, opt_level, passes, required, disabled):
# type: (str, int, list, list, list) -> None
...


def check_expr(env: Module, expr: ir.Expr) -> ir.Type: ...
def generalize(env: Module, expr: ir.Expr) -> ir.Expr: ...
Loading