diff --git a/.github/workflows/cc_bot.yml b/.github/workflows/cc_bot.yml
new file mode 100644
index 0000000000000..873fafa82a601
--- /dev/null
+++ b/.github/workflows/cc_bot.yml
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# GH actions.
+# We use it to cover windows and mac builds
+# Jenkins is still the primary CI
+
+name: PR
+
+on:
+ # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
+ pull_request_target:
+ types: [assigned, opened, synchronize, reopened, edited, ready_for_review]
+
+concurrency:
+ group: PR-${{ github.event.pull_request.number }}
+ cancel-in-progress: true
+
+jobs:
+ cc-reviewers:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ submodules: "recursive"
+ - name: Add cc'ed reviewers
+ env:
+ PR: ${{ toJson(github.event.pull_request) }}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ set -eux
+ python tests/scripts/github_cc_reviewers.py || echo step failed
diff --git a/Jenkinsfile b/Jenkinsfile
index 64b8465a1ea1b..1cd8575fa3fdc 100755
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -46,11 +46,11 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = "tlcpack/ci-lint:v0.67"
-ci_gpu = "tlcpack/ci-gpu:v0.79"
+ci_gpu = "tlcpack/ci-gpu:v0.80"
ci_cpu = "tlcpack/ci-cpu:v0.80"
ci_wasm = "tlcpack/ci-wasm:v0.71"
ci_i386 = "tlcpack/ci-i386:v0.74"
-ci_qemu = "tlcpack/ci-qemu:v0.08"
+ci_qemu = "tlcpack/ci-qemu:v0.09"
ci_arm = "tlcpack/ci-arm:v0.06"
// <--- End of regex-scanned config.
diff --git a/cmake/modules/Hexagon.cmake b/cmake/modules/Hexagon.cmake
index d982601fc1bb1..ab6f1d2ea52ac 100644
--- a/cmake/modules/Hexagon.cmake
+++ b/cmake/modules/Hexagon.cmake
@@ -301,9 +301,6 @@ if(USE_HEXAGON_RPC)
${HEXAGON_RPC_OUTPUT} COPYONLY)
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${HEXAGON_RPC_OUTPUT}")
-
- # Used in `src/target/llvm/llvm_common.h`
- add_definitions(-DTVM_USE_HEXAGON_LLVM)
endif()
if(USE_HEXAGON_DEVICE STREQUAL "${PICK_SIM}")
diff --git a/cmake/modules/HexagonSDK.cmake b/cmake/modules/HexagonSDK.cmake
index e9bc2d2f873f5..42785116214e1 100644
--- a/cmake/modules/HexagonSDK.cmake
+++ b/cmake/modules/HexagonSDK.cmake
@@ -31,7 +31,7 @@ function(find_hexagon_sdk_root HEXAGON_SDK_PATH HEXAGON_ARCH)
# Initial verification of the Hexagon SDK.
message(STATUS "Checking Hexagon SDK root: ${HEXAGON_SDK_PATH}")
- tvm_file_glob(GLOB_RECURSE VERSION_HEADERS "${HEXAGON_SDK_PATH}/*/version.h")
+ file(GLOB_RECURSE VERSION_HEADERS "${HEXAGON_SDK_PATH}/*/version.h")
if(VERSION_HEADERS)
foreach(HEADER IN LISTS VERSION_HEADERS)
if(HEADER MATCHES "incs/version.h$")
diff --git a/docker/Dockerfile.ci_arm b/docker/Dockerfile.ci_arm
index 974998b9d6fe1..73ff0aee7d805 100644
--- a/docker/Dockerfile.ci_arm
+++ b/docker/Dockerfile.ci_arm
@@ -26,6 +26,17 @@ RUN apt-get install -y ca-certificates gnupg2
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
RUN bash /install/ubuntu_install_core.sh
+# Rust env
+COPY install/ubuntu_install_rust.sh /install/ubuntu_install_rust.sh
+RUN bash /install/ubuntu_install_rust.sh
+ENV RUSTUP_HOME /opt/rust
+ENV CARGO_HOME /opt/rust
+ENV PATH $PATH:$CARGO_HOME/bin
+
+# sccache
+COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
+RUN bash /install/ubuntu_install_sccache.sh
+
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
RUN bash /install/ubuntu_install_llvm.sh
@@ -33,7 +44,7 @@ COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh
RUN bash /install/ubuntu1804_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu_install_cmake_source.sh /install/ubuntu_install_cmake_source.sh
RUN bash /install/ubuntu_install_cmake_source.sh
diff --git a/docker/Dockerfile.ci_cpu b/docker/Dockerfile.ci_cpu
index c24df99de6ecc..962d738a9fc2e 100644
--- a/docker/Dockerfile.ci_cpu
+++ b/docker/Dockerfile.ci_cpu
@@ -28,7 +28,7 @@ COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh
RUN bash /install/ubuntu1804_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
@@ -53,6 +53,10 @@ ENV RUSTUP_HOME /opt/rust
ENV CARGO_HOME /opt/rust
ENV PATH $PATH:$CARGO_HOME/bin
+# wasmtime
+COPY install/ubuntu_install_wasmtime.sh /install/ubuntu_install_wasmtime.sh
+RUN bash /install/ubuntu_install_wasmtime.sh
+
# AutoTVM deps
COPY install/ubuntu_install_redis.sh /install/ubuntu_install_redis.sh
RUN bash /install/ubuntu_install_redis.sh
@@ -126,3 +130,7 @@ ENV PATH /opt/arm/gcc-arm-none-eabi/bin:/opt/arm/FVP_Corstone_SSE-300/models/Lin
# PaddlePaddle deps
COPY install/ubuntu_install_paddle.sh /install/ubuntu_install_paddle.sh
RUN bash /install/ubuntu_install_paddle.sh
+
+# sccache
+COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
+RUN bash /install/ubuntu_install_sccache.sh
diff --git a/docker/Dockerfile.ci_gpu b/docker/Dockerfile.ci_gpu
index 3d189e3858b0a..d9ca255502fa5 100644
--- a/docker/Dockerfile.ci_gpu
+++ b/docker/Dockerfile.ci_gpu
@@ -29,7 +29,7 @@ COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh
RUN bash /install/ubuntu1804_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu1804_install_llvm.sh /install/ubuntu1804_install_llvm.sh
RUN bash /install/ubuntu1804_install_llvm.sh
@@ -101,6 +101,10 @@ ENV RUSTUP_HOME /opt/rust
ENV CARGO_HOME /opt/rust
ENV PATH $PATH:$CARGO_HOME/bin
+# wasmtime
+COPY install/ubuntu_install_wasmtime.sh /install/ubuntu_install_wasmtime.sh
+RUN bash /install/ubuntu_install_wasmtime.sh
+
# AutoTVM deps
COPY install/ubuntu_install_redis.sh /install/ubuntu_install_redis.sh
RUN bash /install/ubuntu_install_redis.sh
@@ -117,6 +121,10 @@ RUN bash /install/ubuntu_install_universal.sh
COPY install/ubuntu_install_papi.sh /install/ubuntu_install_papi.sh
RUN bash /install/ubuntu_install_papi.sh "cuda rocm"
+# sccache
+COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
+RUN bash /install/ubuntu_install_sccache.sh
+
# Environment variables
ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH}
diff --git a/docker/Dockerfile.ci_i386 b/docker/Dockerfile.ci_i386
index 564731c12d36e..d4ce370c42051 100644
--- a/docker/Dockerfile.ci_i386
+++ b/docker/Dockerfile.ci_i386
@@ -32,7 +32,7 @@ COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
RUN bash /install/ubuntu_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu_install_cmake_source.sh /install/ubuntu_install_cmake_source.sh
RUN bash /install/ubuntu_install_cmake_source.sh
diff --git a/docker/Dockerfile.ci_lint b/docker/Dockerfile.ci_lint
index 20bcfe6de903a..08d3ebf14e14d 100644
--- a/docker/Dockerfile.ci_lint
+++ b/docker/Dockerfile.ci_lint
@@ -28,7 +28,7 @@ COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh
RUN bash /install/ubuntu1804_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
RUN apt-get update && apt-get install -y doxygen graphviz curl
@@ -41,6 +41,10 @@ ENV RUSTUP_HOME /opt/rust
ENV CARGO_HOME /opt/rust
ENV PATH $PATH:$CARGO_HOME/bin
+# wasmtime
+COPY install/ubuntu_install_wasmtime.sh /install/ubuntu_install_wasmtime.sh
+RUN bash /install/ubuntu_install_wasmtime.sh
+
# java deps for rat
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
diff --git a/docker/Dockerfile.ci_qemu b/docker/Dockerfile.ci_qemu
index f4f774697f2fe..2cae59c35d672 100644
--- a/docker/Dockerfile.ci_qemu
+++ b/docker/Dockerfile.ci_qemu
@@ -29,7 +29,7 @@ RUN bash /install/ubuntu1804_install_python_venv.sh
ENV PATH=/opt/tvm-venv/bin:/opt/zephyr-sdk/sysroots/x86_64-pokysdk-linux/usr/bin:$PATH
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
@@ -42,6 +42,7 @@ COPY install/ubuntu_install_rust.sh /install/ubuntu_install_rust.sh
RUN bash /install/ubuntu_install_rust.sh
ENV RUSTUP_HOME /opt/rust
ENV CARGO_HOME /opt/rust
+ENV PATH $PATH:$CARGO_HOME/bin
# AutoTVM deps
COPY install/ubuntu_install_redis.sh /install/ubuntu_install_redis.sh
@@ -59,6 +60,10 @@ RUN bash /install/ubuntu_install_tensorflow.sh
COPY install/ubuntu_install_tflite.sh /install/ubuntu_install_tflite.sh
RUN bash /install/ubuntu_install_tflite.sh
+# sccache
+COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
+RUN bash /install/ubuntu_install_sccache.sh
+
# Zephyr SDK deps
COPY install/ubuntu_install_zephyr.sh /install/ubuntu_install_zephyr.sh
COPY install/ubuntu_init_zephyr_project.sh /install/ubuntu_init_zephyr_project.sh
diff --git a/docker/Dockerfile.ci_wasm b/docker/Dockerfile.ci_wasm
index 03a1ded5572fa..1c901f12a2ec6 100644
--- a/docker/Dockerfile.ci_wasm
+++ b/docker/Dockerfile.ci_wasm
@@ -25,7 +25,7 @@ COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh
RUN bash /install/ubuntu1804_install_python.sh
# Globally disable pip cache
-RUN pip config set global.cache-dir false
+RUN pip config set global.no-cache-dir false
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
diff --git a/docker/build.sh b/docker/build.sh
index 3ac74835f0abb..39fe7a0242461 100755
--- a/docker/build.sh
+++ b/docker/build.sh
@@ -199,7 +199,9 @@ if [[ -n ${COMMAND} ]]; then
echo ${DOCKER_BINARY}
${DOCKER_BINARY} run --rm --pid=host \
-v ${WORKSPACE}:/workspace \
+ ${SSH_AUTH_SOCK:+-v $SSH_AUTH_SOCK:/ssh-agent} \
-w /workspace \
+ ${SSH_AUTH_SOCK:+-e "SSH_AUTH_SOCK=/ssh-agent"} \
-e "CI_BUILD_HOME=/workspace" \
-e "CI_BUILD_USER=$(id -u -n)" \
-e "CI_BUILD_UID=$(id -u)" \
diff --git a/docker/install/ubuntu_install_rust.sh b/docker/install/ubuntu_install_rust.sh
index c07c0038a36b4..58f8256b03b3b 100755
--- a/docker/install/ubuntu_install_rust.sh
+++ b/docker/install/ubuntu_install_rust.sh
@@ -16,10 +16,7 @@
# specific language governing permissions and limitations
# under the License.
-set -e
-set -u
-set -o pipefail
-
+set -euxo pipefail
export RUSTUP_HOME=/opt/rust
export CARGO_HOME=/opt/rust
@@ -29,12 +26,5 @@ export PATH=$CARGO_HOME/bin:$PATH
rustup component add rustfmt
rustup component add clippy
-# install wasmtime
-apt-get install -y --no-install-recommends libc6-dev-i386
-export WASMTIME_HOME=/opt/wasmtime
-curl https://wasmtime.dev/install.sh -sSf | bash
-export PATH="${WASMTIME_HOME}/bin:${PATH}"
-rustup target add wasm32-wasi
-
# make rust usable by all users
chmod -R a+w /opt/rust
diff --git a/docker/install/ubuntu_install_sccache.sh b/docker/install/ubuntu_install_sccache.sh
new file mode 100644
index 0000000000000..79ce1535c71ec
--- /dev/null
+++ b/docker/install/ubuntu_install_sccache.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+set -u
+set -o pipefail
+
+cargo install sccache
+
+# The docs specifically recommend hard links: https://github.com/mozilla/sccache#known-caveats
+mkdir /opt/sccache
+ln "$(which sccache)" /opt/sccache/cc
+ln "$(which sccache)" /opt/sccache/c++
diff --git a/docker/install/ubuntu_install_wasmtime.sh b/docker/install/ubuntu_install_wasmtime.sh
new file mode 100644
index 0000000000000..d1285b36b4291
--- /dev/null
+++ b/docker/install/ubuntu_install_wasmtime.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -euxo pipefail
+
+# install wasmtime (note: requires ubuntu_install_rust.sh to run first)
+apt-get install -y --no-install-recommends libc6-dev-i386
+export WASMTIME_HOME=/opt/wasmtime
+curl https://wasmtime.dev/install.sh -sSf | bash
+export PATH="${WASMTIME_HOME}/bin:${PATH}"
+rustup target add wasm32-wasi
diff --git a/docs/conf.py b/docs/conf.py
index e74df6cf1e0ed..2f650a88c9361 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -255,6 +255,7 @@ def git_describe_version(original_version):
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
+ "tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
diff --git a/docs/contribute/ci.rst b/docs/contribute/ci.rst
new file mode 100644
index 0000000000000..1e78e9139eb50
--- /dev/null
+++ b/docs/contribute/ci.rst
@@ -0,0 +1,176 @@
+.. Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+.. _ci_guide:
+
+Using TVM's CI
+==============
+
+TVM uses Jenkins for running Linux continuous integration (CI) tests on
+`branches `_ and
+`pull requests `_ through a
+build configuration specified in a `Jenkinsfile `_.
+Non-critical jobs run in GitHub Actions for Windows and MacOS jobs.
+
+A standard CI run looks something like this viewed in `Jenkins' BlueOcean viewer `_.
+CI runs usually take several hours to complete and pull requests (PRs) cannot be merged before CI
+has successfully completed. To diagnose failing steps, click through to the failing
+pipeline stage then to the failing step to see the output logs.
+
+.. image:: https://github.com/tlc-pack/web-data/raw/main/images/contribute/ci.png
+ :width: 800
+ :alt: The Jenkins UI for a CI run
+
+
+Debugging Failures
+******************
+
+When CI fails for some reason, there are several methods to diagnose the issue.
+
+Jenkins Logs
+------------
+
+.. |pytest| replace:: ``pytest``
+.. _pytest: https://docs.pytest.org/en/6.2.x/
+
+The first place to look for a failure is in the CI logs, follow the red Xs on
+the failing job to view the logs. Note:
+
+* Jenkins does not display the full log by default, at the top of the log viewer
+ is a button "Show complete log" which will take you to a plaintext version of the log
+* |pytest|_ failures are summarized at the bottom of the log but you will likely
+ need to scroll up to view the actual failure.
+
+Reproduce Failures
+------------------
+
+Most TVM Python tests run under |pytest|_ and
+can be run as described in :ref:`pr-testing`. For a closer environment to the one
+than runs in CI you can run the docker images directly, build TVM, and execute
+tests inside the container. See :ref:`docker_images` for details.
+
+Keeping CI Green
+****************
+
+Developers rely on the TVM CI to get signal on their PRs before merging.
+Occasionally breakages slip through and break ``main``, which in turn causes
+the same error to show up on an PR that is based on the broken commit(s). Broken
+commits can be identified `through GitHub `_
+via the commit status icon or via `Jenkins `_.
+In these situations it is possible to either revert the offending commit or
+submit a forward fix to address the issue. It is up to the committer and commit
+author which option to choose, keeping in mind that a broken CI affects all TVM
+developers and should be fixed as soon as possible.
+
+Skip CI for Reverts
+-------------------
+
+For reverts and trivial forward fixes, adding ``[skip ci]`` to the revert's
+commit message will cause CI to shortcut and only run lint. Committers should
+take care that they only merge CI-skipped PRs to fix a failure on ``main`` and
+not in cases where the submitter wants to shortcut CI to merge a change faster.
+
+.. code:: bash
+
+ # Revert HEAD commit, make sure to insert '[skip ci]' at the beginning of
+ # the commit subject
+ git revert HEAD
+ git checkout -b my_fix
+ # After you have pushed your branch, create a PR as usual.
+ git push my_repo
+ # Example: Skip CI on a branch with an existing PR
+ # Adding this commit to an existing branch will cause a new CI run where
+ # Jenkins is skipped
+ git commit --allow-empty --message "[skip ci] Trigger skipped CI"
+ git push my_repo
+
+Handling Flaky Failures
+***********************
+
+.. https://stackoverflow.com/questions/4743845/format-text-in-a-link-in-restructuredtext/4836544#4836544
+.. |pytest's @xfail decorator| replace:: pytest's ``@xfail`` decorator
+.. _pytest's @xfail decorator: https://docs.pytest.org/en/6.2.x/skipping.html#xfail-mark-test-functions-as-expected-to-fail
+.. |strict=True| replace:: ``strict=True``
+.. _strict=True: https://docs.pytest.org/en/6.2.x/skipping.html#strict-parameter
+
+If you notice a failure on your PR that seems unrelated to your change, you should
+search `recent GitHub issues related to flaky tests `_ and
+`file a new issue `_
+if you don't see any reports of the failure. If a certain test or class of tests affects
+several PRs or commits on ``main`` with flaky failures, the test should be disabled via
+|pytest's @xfail decorator|_ with |strict=True|_ and the relevant issue linked in the
+disabling PR.
+
+.. code:: python
+
+ @pytest.mark.xfail(strict=False, reason="Flaky test: https://github.com/apache/tvm/issues/1234
+ def test_something_flaky():
+ pass
+
+``ci-docker-staging``
+*********************
+
+The `ci-docker-staging `_
+branch is used to test updates to Docker images and ``Jenkinsfile`` changes. When
+running a build for a normal PR from a forked repository, Jenkins uses the code
+from the PR except for the ``Jenkinsfile`` itself, which comes from the base branch.
+When branches are built, the ``Jenkinsfile`` in the branch is used, so a committer
+with write access must push PRs to a branch in apache/tvm to properly test
+``Jenkinsfile`` changes. If your PR makes changes to the ``Jenkinsfile``, make sure
+to @ a `committer `_
+and ask them to push your PR as a branch to test the changes.
+
+.. _docker_images:
+
+Docker Images
+*************
+
+.. |top_of_the_Jenkinsfile| replace:: top of the ``Jenkinsfile``
+.. _top_of_the_Jenkinsfile: https://github.com/apache/tvm/blob/7481a297740f073b193a3f09b3e27f056e8c7f2e/Jenkinsfile#L48-L54
+
+Each CI job runs most of its work inside a Docker container, built from files
+in the `docker/ `_ folder. These
+files are built nightly in Jenkins via the `docker-images-ci `_ job.
+The images for these containers are hosted in the `tlcpack Docker Hub `_
+and referenced at the |top_of_the_Jenkinsfile|_. These can be inspected and run
+locally via standard Docker commands.
+
+.. code:: bash
+
+ # Beware: CI images can be several GB in size
+ # Get a bare docker shell in the ci-gpu container
+ docker run -it tlcpack/ci-gpu:v0.78 /bin/bash
+
+``docker/bash.sh`` will automatically grab the latest image from the ``Jenkinsfile``
+and help in mounting your current directory.
+
+.. code:: bash
+
+ # Run the ci_cpu image specified in Jenkinsfile
+ cd tvm
+ bash docker/bash.sh ci_cpu
+ # the tvm directory is automatically mounted
+ # example: build tvm (note: this will overrwrite build/)
+ $ ./tests/scripts/task_config_build_cpu.sh
+ $ ./tests/scripts/task_build.sh build -j32
+
+
+Reporting Issues
+****************
+
+Issues with CI should be `reported on GitHub `_
+with a link to the relevant jobs, commits, or PRs.
diff --git a/docs/contribute/committer_guide.rst b/docs/contribute/committer_guide.rst
index 68885b6b927a3..3dc5bf07f3cdd 100644
--- a/docs/contribute/committer_guide.rst
+++ b/docs/contribute/committer_guide.rst
@@ -92,7 +92,7 @@ when they actively manage outstanding PRs,
but watch the community less frequently in the rest of the time.
Remember that your merit will never go away, so please
-take your time and pace when contributing to the project:)
+take your time and pace when contributing to the project :)
Broad Collaboration
@@ -101,37 +101,3 @@ Sometimes, we tend to only interact with people we know.
However, broad collaborations are necessary to the success of the project.
Try to keep that in mind, shepherd PRs for, and request code reviews from
community members who you do not interact physically.
-
-
-Keeping CI Green
-----------------
-Developers rely on the TVM CI to get signal on their PRs before merging.
-Occasionally breakges slip through and break ``main``, which in turn causes
-the same error to show up on an PR that is based on the broken commit(s).
-In these situations it is possible to either revert the offending commit or
-submit a forward fix to address the issue. It is up to the committer and commit
-author which option to choose, keeping in mind that a broken CI affects all TVM
-developers and should be fixed as soon as possible.
-
-For reverts and trivial forward fixes, adding ``[skip ci]`` to the revert's
-commit message will cause CI to shortcut and only run lint. Committers should
-take care that they only merge CI-skipped PRs to fix a failure on ``main`` and
-not in cases where the submitter wants to shortcut CI to merge a change faster.
-
-.. code:: bash
-
- # Example: Skip CI on a revert
- # Revert HEAD commit, make sure to insert '[skip ci]' at the beginning of
- # the commit subject
- git revert HEAD
-
- git checkout -b my_fix
- # After you have pushed your branch, create a PR as usual.
- git push my_repo
-
- # Example: Skip CI on a branch with an existing PR
- # Adding this commit to an existing branch will cause a new CI run where
- # Jenkins is skipped
- git commit --allow-empty --message "[skip ci] Trigger skipped CI"
- git push my_repo
-
diff --git a/docs/contribute/community.rst b/docs/contribute/community.rst
index 8867202a674c3..c41c7f394dd50 100644
--- a/docs/contribute/community.rst
+++ b/docs/contribute/community.rst
@@ -17,8 +17,8 @@
.. _community_guide:
-TVM Community Guideline
-=======================
+TVM Community Guidelines
+========================
TVM adopts the Apache style model and governs by merit. We believe that it is important to create an inclusive community where everyone can use, contribute to, and influence the direction of the project. See `CONTRIBUTORS.md `_ for the current list of contributors.
@@ -42,7 +42,7 @@ Committers are individuals who are granted the write access to the project. A co
- Quality of contributions: High-quality, readable code contributions indicated by pull requests that can be merged without a substantial code review. History of creating clean, maintainable code and including good test cases. Informative code reviews to help other contributors that adhere to a good standard.
- Community involvement: active participation in the discussion forum, promote the projects via tutorials, talks and outreach. We encourage committers to collaborate broadly, e.g. do code reviews and discuss designs with community members that they do not interact physically.
-The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, i.e. least 3 +1 votes, and no vetoes. Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines TVM a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
+The `Project Management Committee (PMC) `_ consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, (i.e. at least 3 +1 votes, and no vetoes). Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines TVM a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
Reviewers
diff --git a/docs/contribute/document.rst b/docs/contribute/document.rst
index e3d12e83865d6..8658c0fea5062 100644
--- a/docs/contribute/document.rst
+++ b/docs/contribute/document.rst
@@ -26,7 +26,8 @@ it is a "simple, comprehensive and nearly universally-applicable scheme. It is
proven in practice across a wide variety of fields and applications."
This document describes the organization of TVM documentation, and how to write
-new documentation.
+new documentation. See `docs/README.md `_
+for instructions on building the docs.
The Four Document Types
***********************
@@ -40,8 +41,8 @@ without necessarily explaining why the software works the way it does. Those
explanations can be saved for other document types. An introductory tutorial
focuses on a successful first experience. These are the most important docs to
turning newcomers into new users and developers. A fully end-to-end
-tutorial— from installing TVM and supporting ML software, to creating and
-training a model, to compiling to different architectures—will give a new
+tutorial — from installing TVM and supporting ML software, to creating and
+training a model, to compiling to different architectures — will give a new
user the opportunity to use TVM in the most efficient way possible. A tutorial
teaches a beginner something they need to know. This is in contrast with a
how-to, which is meant to be an answer to a question that a user with some
@@ -92,7 +93,7 @@ Within these documents you can explore contradictory and conflicting position,
and help the reader make sense of how and why the software was built the way it
is. It's not the place for how-tos and descriptions on how to accomplish tasks.
They instead focus on higher level concepts that help with the understanding of
-the project. Generally these are written by the architects and developers of
+the project. Generally these are written by the architects and developers of
the project, but can useful to help both users and developers to have a deeper
understanding of why the software works the way it does, and how to contribute
to it in ways that are consistent with the underlying design principles.
@@ -124,18 +125,22 @@ Technical Details
*****************
We use the `Sphinx `_ for the main documentation.
-Sphinx support both the reStructuredText and markdown. When possible, we
-encourage to use reStructuredText as it has richer features. Note that the
-python doc-string and tutorials allow you to embed reStructuredText syntax.
+Sphinx supports both reStructuredText and markdown. When possible, we
+encourage reStructuredText as it has richer features. Note that the
+Python doc-string and tutorials allow you to embed reStructuredText syntax.
+
+See
+`docs/README.md `_
+for instructions on building the docs.
Python Reference Documentation
------------------------------
-We use `numpydoc `_ format to
-document the function and classes. The following snippet gives an example
-docstring. We always document all the public functions, when necessary,
-provide an usage example of the features we support(as shown below).
+We use the `numpydoc `_ format to
+document the function and classes. The following snippet gives an example
+docstring. We always document all the public functions, when necessary,
+provide an usage example of the features we support (as shown below).
.. code:: python
@@ -167,19 +172,19 @@ provide an usage example of the features we support(as shown below).
"""
return rv1
-Be careful to leave blank lines between sections of your documents. In the
-above case, there has to be a blank line before `Parameters`, `Returns` and
-`Examples` in order for the doc to be built correctly. To add a new function to
-the doc, we need to add the `sphinx.autodoc
-`_ rules to the
-`docs/api/python `_).
+Be careful to leave blank lines between sections of your documents. In the
+above case, there has to be a blank line before ``Parameters``, ``Returns`` and
+``Examples`` in order for the doc to be built correctly. To add a new function to
+the docs, we need to add the `sphinx.autodoc
+`_ rules to
+`docs/reference/api/python `_).
You can refer to the existing files under this folder on how to add the
functions.
C++ Reference Documentation
---------------------------
-We use the doxgen format to document c++ functions. The following snippet
+We use the doxygen format to document c++ functions. The following snippet
shows an example of c++ docstring.
.. code:: c++
@@ -200,15 +205,15 @@ add comments about code logics to improve readability.
Sphinx Gallery How-Tos
----------------------
-We use the `sphinx-gallery `_ to build many
-python how-tos. You can find the source code under `gallery
-`_ quite self explanatory.
+We use `sphinx-gallery `_ to build many
+Python how-tos. You can find the source code under `gallery
+`_.
One thing that worth noting is that the comment blocks are written in
reStructuredText instead of markdown so be aware of the syntax.
-The how-to code will run on our build server to generate the document page. So
+The how-to code will run on our build server to generate the document page. So
we may have a restriction like not being able to access a remote Raspberry Pi,
-in such case add a flag variable to the tutorial (e.g. `use_rasp`) and allow
+in such case add a flag variable to the tutorial (e.g. ``use_rasp``) and allow
users to easily switch to the real device by changing one flag. Then use the
existing environment to demonstrate the usage.
@@ -218,7 +223,7 @@ If you add a new categorization of how-to, you will need to add references to
Refer to Another Location in the Document
-----------------------------------------
-Please use sphinx's `:ref:` markup to refer to another location in the same doc.
+Please use sphinx's ``:ref:`` markup to refer to another location in the same doc.
.. code-block:: rst
diff --git a/docs/contribute/git_howto.rst b/docs/contribute/git_howto.rst
index 765153be220be..1271aad8a2684 100644
--- a/docs/contribute/git_howto.rst
+++ b/docs/contribute/git_howto.rst
@@ -23,39 +23,39 @@ Git Usage Tips
Here are some tips for git workflow.
-How to resolve a conflict with `main`
--------------------------------------
+How to resolve a conflict with ``main``
+---------------------------------------
- First rebase to most recent main
-.. code:: bash
+ .. code:: bash
- # The first two steps can be skipped after you do it once.
- git remote add upstream [url to tvm repo]
- git fetch upstream
- git rebase upstream/main
+ # The first two steps can be skipped after you do it once.
+ git remote add upstream [url to tvm repo]
+ git fetch upstream
+ git rebase upstream/main
-- The git may show some conflicts it cannot merge, say `conflicted.py`.
+- The git may show some conflicts it cannot merge, say ``conflicted.py``.
- Manually modify the file to resolve the conflict.
- After you resolved the conflict, mark it as resolved by
-.. code:: bash
+ .. code:: bash
- git add conflicted.py
+ git add conflicted.py
- Then you can continue rebase by
-.. code:: bash
+ .. code:: bash
- git rebase --continue
+ git rebase --continue
- Finally push to your fork, you may need to force push here.
-.. code:: bash
+ .. code:: bash
- git push --force
+ git push --force
How to combine multiple commits into one
@@ -66,35 +66,36 @@ to create a PR with set of meaningful commits. You can do it by following steps.
- Before doing so, configure the default editor of git if you haven't done so before.
-.. code:: bash
+ .. code:: bash
- git config core.editor the-editor-you-like
+ git config core.editor the-editor-you-like
- Assume we want to merge last 3 commits, type the following commands
-.. code:: bash
+ .. code:: bash
- git rebase -i HEAD~3
+ git rebase -i HEAD~3
-- It will pop up an text editor. Set the first commit as `pick`, and change later ones to `squash`.
+- It will pop up an text editor. Set the first commit as ``pick``, and change later ones to ``squash``.
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
- Push the changes to your fork, you need to force push.
-.. code:: bash
+ .. code:: bash
- git push --force
+ git push --force
Reset to the most recent main branch
------------------------------------
You can always use git reset to reset your version to the most recent main.
-Note that all your ***local changes will get lost***.
+Note that **all your local changes will get lost**.
So only do it when you do not have local changes or when your pull request just get merged.
.. code:: bash
- git reset --hard [hash tag of main]
+ git fetch origin main
+ git reset --hard FETCH_HEAD
Recover a Previous Commit after Reset
diff --git a/docs/contribute/index.rst b/docs/contribute/index.rst
index acacfdc8a6e26..aa893dbccb72b 100644
--- a/docs/contribute/index.rst
+++ b/docs/contribute/index.rst
@@ -48,4 +48,5 @@ Here are guidelines for contributing to various aspect of the project:
error_handling
pull_request
git_howto
- release_process
+ ci
+ release_process
\ No newline at end of file
diff --git a/docs/contribute/pull_request.rst b/docs/contribute/pull_request.rst
index 23d2b1441ce8f..226e693e2c724 100644
--- a/docs/contribute/pull_request.rst
+++ b/docs/contribute/pull_request.rst
@@ -86,6 +86,8 @@ Here is the protocol to update CI image:
- Tag the new version as the latest.
- Periodically cleanup the old versions on local workers
+.. _pr-testing:
+
Testing
-------
Even though we have hooks to run unit tests automatically for each pull request, it's always recommended to run unit tests
diff --git a/docs/legacy_redirect.py b/docs/legacy_redirect.py
index 0f1dee6dbf240..56e8d26d0ba38 100644
--- a/docs/legacy_redirect.py
+++ b/docs/legacy_redirect.py
@@ -242,6 +242,10 @@
"tutorials/get_started/tvmc_command_line_driver.html",
"../../tutorial/tvmc_command_line_driver.html",
],
+ [
+ "tutorials/get_started/tvmc_python.html",
+ "../../tutorial/tvmc_python.html",
+ ],
]
redirect_template = """
diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py
new file mode 100644
index 0000000000000..1f685589730fe
--- /dev/null
+++ b/gallery/tutorial/tvmc_python.py
@@ -0,0 +1,292 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Getting Starting using TVMC Python: a high-level API for TVM
+=============================================================
+**Author**:
+`Jocelyn Shiue `_
+
+Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂
+
+Before we get started let's get an example model if you don't already have one.
+Follow the steps to download a resnet model via the terminal:
+
+ .. code-block:: python
+
+ mkdir myscripts
+ cd myscripts
+ wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx
+ mv resnet50-v2-7.onnx my_model.onnx
+ touch tvmcpythonintro.py
+
+Let's start editing the python file in your favorite text editor.
+"""
+
+################################################################################
+# Step 0: Imports
+# ~~~~~~~~~~~~~~~
+#
+# .. code-block:: python
+#
+# from tvm.driver import tvmc
+#
+#
+
+################################################################################
+# Step 1: Load a model
+# ~~~~~~~~~~~~~~~~~~~~
+#
+# Let's import our model into tvmc. This step converts a machine learning model from
+# a supported framework into TVM's high level graph representation language called Relay.
+# This is to have a unified starting point for all models in tvm. The frameworks we currently
+# support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch.
+#
+# .. code-block:: python
+#
+# model = tvmc.load('my_model.onnx') #Step 1: Load
+#
+# If you'd like to see the Relay, you can run:
+# ``model.summary()``
+#
+# All frameworks support overwriting the input shapes with a shape_dict argument.
+# For most frameworks this is optional, but for Pytorch this is necessary as
+# TVM cannot automatically search for it.
+#
+# .. code-block:: python
+#
+# #model = tvmc.load(my_model, shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict
+#
+# A suggested way to see the model's input/shape_dict is via `netron `_. After opening the model,
+# click the first node to see the name(s) and shape(s) in the inputs section.
+
+
+################################################################################
+# Step 2: Compile
+# ~~~~~~~~~~~~~~~
+#
+# Now that our model is in Relay, our next step is to compile it to a desired
+# hardware to run on. We refer to this hardware as a target. This compilation process
+# translates the model from Relay into a lower-level language that the
+# target machine can understand.
+#
+# In order to compile a model a tvm.target string is required.
+# To learn more about tvm.targets and their options look at the `documentation `_.
+# Some examples include:
+#
+# 1. cuda (Nvidia GPU)
+# 2. llvm (CPU)
+# 3. llvm -mcpu=cascadelake (Intel CPU)
+#
+# .. code-block:: python
+#
+# package = tvmc.compile(model, target="llvm") #Step 2: Compile
+#
+#
+# The compilation step returns a package.
+#
+
+################################################################################
+# Step 3: Run
+# ~~~~~~~~~~~
+#
+# The compiled package can now be run on the hardware target. The device
+# input options are: CPU, Cuda, CL, Metal, and Vulkan.
+#
+# .. code-block:: python
+#
+# result = tvmc.run(package, device="cpu") #Step 3: Run
+#
+# And you can print the results:
+# ``print(results)``
+#
+
+################################################################################
+# Step 1.5: Tune [Optional & Recommended]
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Run speed can further be improved by tuning. This optional step uses
+# machine learning to look at each operation within a model (a function) and
+# tries to find a faster way to run it. We do this through a cost model, and
+# benchmarking possible schedules.
+#
+# The target is the same as compile.
+#
+# .. code-block:: python
+#
+# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune
+#
+# The terminal output should look like:
+#
+# .. code-block:: python
+#
+# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s
+# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s
+# .....
+#
+# There may be UserWarnings that can be ignored.
+# This should make the end result faster, but it can take hours to tune.
+#
+# See the section 'Saving the Tuning Results' below. Be sure to pass the tuning
+# results into compile if you want the results to apply.
+#
+# .. code-block:: python
+#
+# #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile
+
+################################################################################
+# Save and then start the process in the terminal:
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# .. code-block:: python
+#
+# python my_tvmc_script.py
+#
+# Note: Your fans may become very active
+#
+
+################################################################################
+# Example results:
+# ~~~~~~~~~~~~~~~~
+#
+# .. code-block:: python
+#
+# Time elapsed for training: 18.99 s
+# Execution time summary:
+# mean (ms) max (ms) min (ms) std (ms)
+# 25.24 26.12 24.89 0.38
+#
+#
+# Output Names:
+# ['output_0']
+#
+
+
+################################################################################
+# Additional TVMC Functionalities
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+
+################################################################################
+# Saving the model
+# ~~~~~~~~~~~~~~~~
+#
+# To make things faster for later, after loading the model (Step 1) save the Relay version.
+# The model will then appear where you saved it for later in the coverted syntax.
+#
+# .. code-block:: python
+#
+# model = tvmc.load('my_model.onnx') #Step 1: Load
+# model.save(desired_model_path)
+#
+#
+
+################################################################################
+# Saving the package
+# ~~~~~~~~~~~~~~~~~~
+#
+# After the model has been compiled (Step 2) the package also is also saveable.
+#
+# .. code-block:: python
+#
+# tvmc.compile(model, target="llvm", package_path="whatever")
+#
+# new_package = tvmc.TVMCPackage(package_path="whatever")
+# result = tvmc.run(new_package) #Step 3: Run
+#
+#
+
+################################################################################
+# Using Autoscheduler
+# ~~~~~~~~~~~~~~~~~~~
+#
+# Use the next generation of tvm to enable potentially faster run speed results.
+# The search space of the schedules is automatically generated unlike
+# previously where they needed to be hand written. (Learn more:
+# `1 `_,
+# `2 `_)
+#
+# .. code-block:: python
+#
+# tvmc.tune(model, target="llvm", enable_autoscheduler = True)
+#
+#
+
+################################################################################
+# Saving the tuning results
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# The tuning results can be saved in a file for later reuse.
+#
+# Method 1:
+# .. code-block:: python
+#
+# log_file = "hello.json"
+#
+# # Run tuning
+# tvmc.tune(model, target="llvm",tuning_records=log_file)
+#
+# ...
+#
+# # Later run tuning and reuse tuning results
+# tvmc.tune(model, target="llvm",tuning_records=log_file)
+#
+# Method 2:
+# .. code-block:: python
+#
+# # Run tuning
+# tuning_records = tvmc.tune(model, target="llvm")
+#
+# ...
+#
+# # Later run tuning and reuse tuning results
+# tvmc.tune(model, target="llvm",tuning_records=tuning_records)
+#
+
+################################################################################
+# Tuning a more complex model:
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# If you notice T's printing that look like ``.........T.T..T..T..T.T.T.T.T.T.``
+# increase the searching time frame:
+#
+# .. code-block:: python
+#
+# tvmc.tune(model,trials=10000,timeout=10,)
+#
+
+################################################################################
+# Compiling a model for a remote device:
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# A remote procedural call (RPC) is useful when you would like to compile for hardware
+# that is not on your local machine. The tvmc methods support this.
+# To set up the RPC server take a look at the 'Set up RPC Server on Device'
+# section in this `document `_.
+#
+# Within the TVMC Script include the following and adjust accordingly:
+#
+# .. code-block:: python
+#
+# tvmc.tune(
+# model,
+# target=target, # Compilation target as string // Device to compile for
+# target_host=target_host, # Host processor
+# hostname=host_ip_address, #The IP address of an RPC tracker, used when benchmarking remotely.
+# port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090.
+# rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided
+# )
+#
diff --git a/include/tvm/meta_schedule/schedule_rule.h b/include/tvm/meta_schedule/schedule_rule.h
index 8313da067f09f..95fce13df02f6 100644
--- a/include/tvm/meta_schedule/schedule_rule.h
+++ b/include/tvm/meta_schedule/schedule_rule.h
@@ -115,7 +115,6 @@ class ScheduleRule : public runtime::ObjectRef {
* \brief Create an auto-inline rule that inlines spatial blocks if it satisfies some conditions
* \param into_producer If allows to inline a block into its producer
* \param into_consumer If allows to inline a block into its consumer
- * \param into_cache_only If it only allows to inline into a block generated by cache_read/write
* \param inline_const_tensor Always inline constant tensors
* \param disallow_if_then_else Always disallow if-then-else-like constructs
* \param require_ordered Always require the read-to-write mapping to be ordered
@@ -125,7 +124,6 @@ class ScheduleRule : public runtime::ObjectRef {
*/
TVM_DLL static ScheduleRule AutoInline(bool into_producer, //
bool into_consumer, //
- bool into_cache_only, //
bool inline_const_tensor, //
bool disallow_if_then_else, //
bool require_injective, //
@@ -154,6 +152,16 @@ class ScheduleRule : public runtime::ObjectRef {
Optional vector_load_max_len, //
Optional