Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[docs] Add "Open with Colab" button to documentation #13627

Merged
merged 10 commits into from
Jan 7, 2023
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
970 changes: 196 additions & 774 deletions apps/microtvm/poetry.lock

Large diffs are not rendered by default.

14 changes: 6 additions & 8 deletions apps/microtvm/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ name = "microtvm"
version = "0.1.0"
description = ""
authors = []
packages = [
{ include = "tvm", from = "../../python" },
]

[tool.poetry.dependencies]
python = ">=3.7, <3.9"
Expand Down Expand Up @@ -98,22 +95,23 @@ onnxoptimizer = { version = "==0.2.6", optional = true }
onnxruntime = { version = "==1.9.0", optional = true }

# Pytorch (also used by ONNX)
torch = { version = "==1.11.0", optional = true }
torchvision = { version = "==0.12.0", optional = true }
torch = { version = "==1.11.0" }
guberti marked this conversation as resolved.
Show resolved Hide resolved
torchvision = { version = "==0.12.0" }

future = { version = "==*", optional = true }

# Tensorflow frontend
tensorflow = {version = "^2.1", optional = true}
tensorflow-estimator = {version = "^2.1", optional = true}
tensorflow = {version = "^2.1"}
tensorflow-estimator = {version = "^2.1"}

# TFLite frontend
tflite = {version = "2.1.0", optional = true}
tflite = {version = "2.1.0"}
wheel = "*"
cloudpickle = "^1.6.0"
pyusb = "^1.2.1"



[tool.poetry.extras]
xgboost = ["xgboost"]
importer-caffe2 = ["torch"]
Expand Down
210 changes: 207 additions & 3 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,17 @@
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from functools import partial
import gc
from importlib import import_module
import inspect
from hashlib import md5
import os
from pathlib import Path
import re
import sys

import sphinx_gallery

from textwrap import dedent, indent
from unittest.mock import patch

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
Expand Down Expand Up @@ -84,6 +86,198 @@ def git_describe_version(original_version):
version = git_describe_version(tvm.__version__)
release = version


def monkey_patch(module_name, func_name):
areusch marked this conversation as resolved.
Show resolved Hide resolved
"""Helper function for monkey-patching library functions.

Used to modify a few sphinx-gallery behaviors to make the "Open in Colab" button work correctly.
Should be called as a decorator with arguments. Note this behaves differently from unittest's
@mock.patch, as our monkey_patch decorator should be placed on the new version of the function.
"""
module = import_module(module_name)
original_func = getattr(module, func_name)

def decorator(function):
updated_func = partial(function, real_func=original_func)
setattr(module, func_name, updated_func)
return updated_func

return decorator


CURRENT_FILE_CONF = None


@monkey_patch("sphinx_gallery.py_source_parser", "split_code_and_text_blocks")
def split_code_and_text_blocks(source_file, return_node, real_func):
"""Monkey-patch split_code_and_text_blocks to expose sphinx-gallery's file-level config.

It's kinda gross, but we need access to file_conf to detect the requires_cuda flag.
"""
global CURRENT_FILE_CONF
file_conf, blocks, node = real_func(source_file, return_node)
CURRENT_FILE_CONF = file_conf
return (file_conf, blocks, node)


# This header replaces the default sphinx-gallery one in sphinx_gallery/gen_rst.py.
COLAB_HTML_HEADER = """
.. DO NOT EDIT. THIS FILE WAS AUTOMATICALLY GENERATED BY
.. TVM'S MONKEY-PATCHED VERSION OF SPHINX-GALLERY. TO MAKE
.. CHANGES, EDIT THE SOURCE PYTHON FILE:
.. "{python_file}"

.. only:: html

.. note::
:class: sphx-glr-download-link-note

This tutorial can be used interactively with Google Colab! You can also click
:ref:`here <sphx_glr_download_{ref_name}>` to run the Jupyter notebook locally.

.. image:: {button_svg}
:align: center
:target: {colab_url}
:width: 300px

.. rst-class:: sphx-glr-example-title

.. _sphx_glr_{ref_name}:

"""

# Google Colab allows opening .ipynb files on GitHub by appending a GitHub path to this base URL.
COLAB_URL_BASE = "https://colab.research.google.com/github"

# The GitHub path where the site is automatically deployed by tvm-bot.
IPYTHON_GITHUB_BASE = "apache/tvm-site/blob/asf-site/docs/_downloads/"
guberti marked this conversation as resolved.
Show resolved Hide resolved

# The SVG image of the "Open in Colab" button.
BUTTON = "https://raw.githubusercontent.com/apache/web-data/main/images/utilities/colab_button.svg"


@monkey_patch("sphinx_gallery.gen_rst", "save_rst_example")
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf, real_func):
"""Monkey-patch save_rst_example to include the "Open in Colab" button."""

# The url is the md5 hash of the notebook path.
example_fname = os.path.relpath(example_file, gallery_conf["src_dir"])
ref_fname = example_fname.replace(os.path.sep, "_")
notebook_path = example_fname[:-2] + "ipynb"
digest = md5(notebook_path.encode()).hexdigest()

# Fixed documentation versions must link to different (earlier) .ipynb notebooks.
colab_url = f"{COLAB_URL_BASE}/{IPYTHON_GITHUB_BASE}"
if "dev" not in version:
colab_url += version + "/"
colab_url += digest + "/" + os.path.basename(notebook_path)

new_header = COLAB_HTML_HEADER.format(
python_file=example_fname, ref_name=ref_fname, colab_url=colab_url, button_svg=BUTTON
)
with patch("sphinx_gallery.gen_rst.EXAMPLE_HEADER", new_header):
real_func(example_rst, example_file, time_elapsed, memory_used, gallery_conf)


INCLUDE_DIRECTIVE_RE = re.compile(r"^([ \t]*)\.\. include::\s*(.+)\n", flags=re.M)
COMMENT_DIRECTIVE_RE = re.compile(r"^\.\.(?: .*)?\n(?:(?: .*)?\n)*", flags=re.M)
ADMONITION_DIRECTIVE_RE = re.compile(rf"^\.\. admonition:: *(.*)\n((?:(?: .*)?\n)*)\n", flags=re.M)


@monkey_patch("sphinx_gallery.notebook", "rst2md")
def rst2md(text, gallery_conf, target_dir, heading_levels, real_func):
"""Monkey-patch rst2md to support comments and some include directives.

Currently, only include directives without any parameters are supported. Also, note that in
reStructuredText any unrecognized explicit markup block is treated as a comment (see
https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#comments).

For callouts, we only replace generic "admonition" directives. All others should be replaced by
sphinx-gallery's rst2md. Note that the "alert" and "alert-info" tags are support in most IPython
notebooks, but they render kinda funky on Colab.
"""

def load_include(match):
full_path = os.path.join(target_dir, match.group(2))
with open(full_path) as f:
lines = f.read()
indented = indent(lines, match.group(1)) + "\n"
return indented

text = re.sub(INCLUDE_DIRECTIVE_RE, load_include, text)

# Replace generic, titled admonitions with indented text. Other admonitions (e.g. .. note::)
# will be handled by sphinx-gallery's
def rewrite_generic_admonition(match):
title, text = match.groups()
stripped_text = dedent(text).strip()
return f'<div class="alert alert-info"><h4>{title}</h4><p>{stripped_text}</p></div>'

text = re.sub(ADMONITION_DIRECTIVE_RE, rewrite_generic_admonition, text)

# Call the real function, and then strip any remaining directives (i.e. comments)
text = real_func(text, gallery_conf, target_dir, heading_levels)
text = re.sub(COMMENT_DIRECTIVE_RE, "", text)
return text


INSTALL_TVM_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm --pre"""

INSTALL_TVM_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm=={version}"""

INSTALL_TVM_CUDA_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels"""

INSTALL_TVM_CUDA_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels"""
areusch marked this conversation as resolved.
Show resolved Hide resolved


@monkey_patch("sphinx_gallery.gen_rst", "jupyter_notebook")
def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func):
"""Monkey-patch sphinx-gallery to add a TVM import block to each IPython notebook.

If we had only one import block, we could skip the patching and just set first_notebook_cell.
However, how we import TVM depends on if we are using a fixed or dev version, and whether we
will use the GPU.

Tutorials requiring a CUDA-enabled build of TVM should use the flag:
# sphinx_gallery_requires_cuda = True
"""

requires_cuda = CURRENT_FILE_CONF.get("requires_cuda", False)
fixed_version = not "dev" in version

if fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_FIXED
elif fixed_version and not requires_cuda:
install_block = INSTALL_TVM_FIXED
elif not fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_DEV
else:
install_block = INSTALL_TVM_DEV

new_conf = {**gallery_conf, "first_notebook_cell": install_block}
return real_func(script_blocks, new_conf, target_dir)


# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
Expand Down Expand Up @@ -506,6 +700,16 @@ def process_docstring(app, what, name, obj, options, lines):
from legacy_redirect import build_legacy_redirect


def strip_ipython_magic(app, docname, source):
guberti marked this conversation as resolved.
Show resolved Hide resolved
"""Prevents IPython magic commands from being rendered in HTML files.

TODO rework this function to remove IPython magic commands from include directives too.
"""
for i in range(len(source)):
source[i] = re.sub(r"%%.*\n\s*", "", source[i])


def setup(app):
app.connect("source-read", strip_ipython_magic)
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
7 changes: 3 additions & 4 deletions gallery/how_to/compile_models/from_coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,12 @@

This article is an introductory tutorial to deploy CoreML models with Relay.

For us to begin with, coremltools module is required to be installed.

A quick solution is to install via pip
To begin, we must install coremltools:

.. code-block:: bash

pip install -U coremltools --user
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i'm curious, does --user not work with colab? It's not the worst thing to tell folks new to managing python venvs.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

--user works as you would expect in Colab. However, the other tutorials don't use that flag for their dependencies, so I changed this to be consistent. Might be out of scope - what do you think?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah yeah just wanted to understand the why here

%%shell
pip install coremltools

or please refer to official site
https://github.com/apple/coremltools
Expand Down
5 changes: 3 additions & 2 deletions gallery/how_to/compile_models/from_darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@

.. code-block:: bash

pip install cffi
pip install opencv-python
%%shell
pip install cffi opencv-python

"""

# sphinx_gallery_start_ignore
Expand Down
7 changes: 4 additions & 3 deletions gallery/how_to/compile_models/from_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
=====================
**Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_

This article is an introductory tutorial to deploy keras models with Relay.
This article is an introductory tutorial to deploy Keras models with Relay.

For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras.
Expand All @@ -28,14 +28,15 @@

.. code-block:: bash

pip install -U keras --user
pip install -U tensorflow --user
%%shell
pip install keras tensorflow

or please refer to official site
https://keras.io/#installation
"""

# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
from tvm import testing

testing.utils.install_request_hook(depth=3)
Expand Down
10 changes: 4 additions & 6 deletions gallery/how_to/compile_models/from_mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,19 @@
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_

This article is an introductory tutorial to deploy mxnet models with Relay.

For us to begin with, mxnet module is required to be installed.

A quick solution is
This article is an introductory tutorial to deploy mxnet models with Relay. To begin, we must install `mxnet`:

.. code-block:: bash

pip install mxnet --user
%%shell
pip install mxnet

or please refer to official installation guide.
https://mxnet.apache.org/versions/master/install/index.html
"""

# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
from tvm import testing

testing.utils.install_request_hook(depth=3)
Expand Down
4 changes: 3 additions & 1 deletion gallery/how_to/compile_models/from_oneflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@

.. code-block:: bash

%%shell
pip install flowvision==0.1.0
python3 -m pip install -f https://release.oneflow.info oneflow==0.7.0+cpu
pip install -f https://release.oneflow.info oneflow==0.7.0+cpu

or please refer to official site:
https://github.com/Oneflow-Inc/oneflow
Expand All @@ -37,6 +38,7 @@
"""

# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
from tvm import testing

testing.utils.install_request_hook(depth=3)
Expand Down
7 changes: 3 additions & 4 deletions gallery/how_to/compile_models/from_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,12 @@

This article is an introductory tutorial to deploy ONNX models with Relay.

For us to begin with, ONNX package must be installed.

A quick solution is to install protobuf compiler, and
For us to begin with, ONNX package must be installed:
guberti marked this conversation as resolved.
Show resolved Hide resolved

.. code-block:: bash

pip install --user onnx onnxoptimizer
%%shell
pip install onnx onnxoptimizer

or please refer to official site.
https://github.com/onnx/onnx
Expand Down
Loading