From 5698ecf8257c68a2bfb74f4958a3281e6be285a1 Mon Sep 17 00:00:00 2001 From: Qiang Zhang Date: Tue, 15 Sep 2020 22:33:29 +0800 Subject: [PATCH] [DOC] Fix Some Broken Web Links (#6475) --- apps/wasm-standalone/README.md | 2 +- docs/dev/inferbound.rst | 8 ++++---- tutorials/frontend/build_gcn.py | 2 +- tutorials/optimize/opt_matmul_auto_tensorcore.py | 2 +- web/README.md | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/apps/wasm-standalone/README.md b/apps/wasm-standalone/README.md index 4b6678797795..1456000c2669 100644 --- a/apps/wasm-standalone/README.md +++ b/apps/wasm-standalone/README.md @@ -39,7 +39,7 @@ TVM hardware support -As demonstrated in TVM runtime [tutorials](https://tvm.apache.org/docs/tutorials/relay_quick_start.html), TVM already supports WASM as the optional hardware backend, so we can leverage the features of WebAssembly (portability, security) and TVM runtime (domain-specific, optimization) to build a flexible and auto-optimized graph compiler for all deep learning frameworks. +As demonstrated in TVM runtime [tutorials](https://tvm.apache.org/docs/tutorials/get_started/relay_quick_start.html), TVM already supports WASM as the optional hardware backend, so we can leverage the features of WebAssembly (portability, security) and TVM runtime (domain-specific, optimization) to build a flexible and auto-optimized graph compiler for all deep learning frameworks. ## Framework Landscape diff --git a/docs/dev/inferbound.rst b/docs/dev/inferbound.rst index 63954ac908f8..69566006893e 100644 --- a/docs/dev/inferbound.rst +++ b/docs/dev/inferbound.rst @@ -22,7 +22,7 @@ InferBound Pass ******************************************* -The InferBound pass is run after normalize, and before ScheduleOps `build_module.py `_. The main job of InferBound is to create the bounds map, which specifies a Range for each IterVar in the program. These bounds are then passed to ScheduleOps, where they are used to set the extents of For loops, see `MakeLoopNest `_, and to set the sizes of allocated buffers (`BuildRealize `_), among other uses. +The InferBound pass is run after normalize, and before ScheduleOps `build_module.py `_. The main job of InferBound is to create the bounds map, which specifies a Range for each IterVar in the program. These bounds are then passed to ScheduleOps, where they are used to set the extents of For loops, see `MakeLoopNest `_, and to set the sizes of allocated buffers (`BuildRealize `_), among other uses. The output of InferBound is a map from IterVar to Range: @@ -53,9 +53,9 @@ Therefore, let's review the Range and IterVar classes: }; } -Note that IterVarNode also contains a Range ``dom``. This ``dom`` may or may not have a meaningful value, depending on when the IterVar was created. For example, when ``tvm.compute`` is called, an `IterVar is created `_ for each axis and reduce axis, with dom's equal to the shape supplied in the call to ``tvm.compute``. +Note that IterVarNode also contains a Range ``dom``. This ``dom`` may or may not have a meaningful value, depending on when the IterVar was created. For example, when ``tvm.compute`` is called, an `IterVar is created `_ for each axis and reduce axis, with dom's equal to the shape supplied in the call to ``tvm.compute``. -On the other hand, when ``tvm.split`` is called, `IterVars are created `_ for the inner and outer axes, but these IterVars are not given a meaningful ``dom`` value. +On the other hand, when ``tvm.split`` is called, `IterVars are created `_ for the inner and outer axes, but these IterVars are not given a meaningful ``dom`` value. In any case, the ``dom`` member of an IterVar is never modified during InferBound. However, keep in mind that the ``dom`` member of an IterVar is sometimes used as default value for the Ranges InferBound computes. @@ -117,7 +117,7 @@ Tensors haven't been mentioned yet, but in the context of TVM, a Tensor represen int value_index; }; -In the Operation class declaration above, we can see that each operation also has a list of InputTensors. Thus the stages of the schedule form a DAG, where each stage is a node in the graph. There is an edge in the graph from Stage A to Stage B, if the operation of Stage B has an input tensor whose source operation is the op of Stage A. Put simply, there is an edge from A to B, if B consumes a tensor produced by A. See the diagram below. This graph is created at the beginning of InferBound, by a call to `CreateReadGraph `_. +In the Operation class declaration above, we can see that each operation also has a list of InputTensors. Thus the stages of the schedule form a DAG, where each stage is a node in the graph. There is an edge in the graph from Stage A to Stage B, if the operation of Stage B has an input tensor whose source operation is the op of Stage A. Put simply, there is an edge from A to B, if B consumes a tensor produced by A. See the diagram below. This graph is created at the beginning of InferBound, by a call to `CreateReadGraph `_. .. image:: https://raw.githubusercontent.com/tvmai/tvmai.github.io/master/images/docs/inferbound/stage_graph.png :align: center diff --git a/tutorials/frontend/build_gcn.py b/tutorials/frontend/build_gcn.py index b478694ab1fb..5c571ef1ff25 100644 --- a/tutorials/frontend/build_gcn.py +++ b/tutorials/frontend/build_gcn.py @@ -164,7 +164,7 @@ def evaluate(data, logits): # Define Graph Convolution Layer in Relay # --------------------------------------- # To run GCN on TVM, we first need to implement Graph Convolution Layer. -# You may refer to https://github.com/dmlc/dgl/blob/master/python/dgl/nn/mxnet/conv.py for a GraphConv Layer implemented in DGL with MXNet Backend +# You may refer to https://github.com/dmlc/dgl/blob/master/python/dgl/nn/mxnet/conv/graphconv.py for a GraphConv Layer implemented in DGL with MXNet Backend # # The layer is defined with below operations, note that we apply two transposes to keep adjacency matrix on right hand side of sparse_dense operator, # this method is temporary and will be updated in next few weeks when we have sparse matrix transpose and support for left sparse operator. diff --git a/tutorials/optimize/opt_matmul_auto_tensorcore.py b/tutorials/optimize/opt_matmul_auto_tensorcore.py index 45a0d4874105..d81eca56210e 100644 --- a/tutorials/optimize/opt_matmul_auto_tensorcore.py +++ b/tutorials/optimize/opt_matmul_auto_tensorcore.py @@ -96,7 +96,7 @@ def matmul_nn(A, B, L, dtype="float16", layout="NN"): # (2) The warp tile size is not 16x16x16 on CUDA9, or not one of {16x16x16, 32x8x16, 8x32x16} on CUDA version >= 10.0. # # In this schedule, storage_align is used to reduce bank conflicts of shared memory. Please refer to this -# `doc `_ +# `doc `_ # for the usage of storage_align primitive. In short, we need to add an offset to some shared memory buffer # to reduce bank conflicts. # According to the `wmma doc `_, diff --git a/web/README.md b/web/README.md index 358884ca26b1..43540c6665c1 100644 --- a/web/README.md +++ b/web/README.md @@ -63,7 +63,7 @@ This command will create the tvmjs library that we can use to interface with the Check code snippet in -- [tests/python/prepare_test_libs.py](https://github.com/apache/incubator-tvm/tree/master/web/tests/pythob/prepare_test_libs.py) +- [tests/python/prepare_test_libs.py](https://github.com/apache/incubator-tvm/tree/master/web/tests/python/prepare_test_libs.py) shows how to create a wasm library that links with tvm runtime. - Note that all wasm libraries have to created using the `--system-lib` option - emcc.create_wasm will automatically link the runtime library `dist/wasm/libtvm_runtime.bc`