From ac5637b59c66ccd6f8d0ff1abe86d67a1ff31106 Mon Sep 17 00:00:00 2001 From: Eric Lunderberg Date: Fri, 6 Aug 2021 11:53:44 -0500 Subject: [PATCH] [Docker] Updated command-line parsing of docker/bash.sh - Maintained previous behavior, any unrecognized flags after the docker/bash.sh are part of the command, no -- is needed. (e.g. docker/bash.sh ci_gpu make -j2) - Reverted changes to Jenskinsfile to add a --, no longer needed. --- Jenkinsfile | 94 +++++++++++++++++++++++++------------------------- docker/bash.sh | 82 +++++++++++++++++++++++++++---------------- 2 files changed, 100 insertions(+), 76 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index c49ed59da7087..65ccbf27326f1 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -112,7 +112,7 @@ stage("Sanity Check") { node('CPU') { ws(per_exec_ws("tvm/sanity")) { init_git() - sh "${docker_run} ${ci_lint} -- ./tests/scripts/task_lint.sh" + sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } } @@ -124,18 +124,18 @@ stage("Sanity Check") { def make(docker_type, path, make_flag) { timeout(time: max_time, unit: 'MINUTES') { try { - sh "${docker_run} ${docker_type} -- ./tests/scripts/task_build.sh ${path} ${make_flag}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}" // always run cpp test when build - sh "${docker_run} ${docker_type} -- ./tests/scripts/task_cpp_unittest.sh" + sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh" } catch (hudson.AbortException ae) { // script exited due to user abort, directly throw instead of retry if (ae.getMessage().contains('script returned exit code 143')) { throw ae } echo 'Incremental compilation failed. Fall back to build from scratch' - sh "${docker_run} ${docker_type} -- ./tests/scripts/task_clean.sh ${path}" - sh "${docker_run} ${docker_type} -- ./tests/scripts/task_build.sh ${path} ${make_flag}" - sh "${docker_run} ${docker_type} -- ./tests/scripts/task_cpp_unittest.sh" + sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh" } } } @@ -164,11 +164,11 @@ stage('Build') { node('GPUBUILD') { ws(per_exec_ws("tvm/build-gpu")) { init_git() - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_config_build_gpu.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" make(ci_gpu, 'build', '-j2') pack_lib('gpu', tvm_multilib) // compiler test - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_config_build_gpu_vulkan.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh" make(ci_gpu, 'build2', '-j2') } } @@ -177,18 +177,18 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-cpu")) { init_git() - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_config_build_cpu.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" make(ci_cpu, 'build', '-j2') pack_lib('cpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_vta_fsim.sh" - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_vta_tsim.sh" - // sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_golang.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - // sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_rust.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" junit "build/pytest-results/*.xml" } } @@ -198,11 +198,11 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-wasm")) { init_git() - sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_config_build_wasm.sh" + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" make(ci_wasm, 'build', '-j2') timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_web_wasm.sh" + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" } } } @@ -211,7 +211,7 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-i386")) { init_git() - sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_config_build_i386.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" make(ci_i386, 'build', '-j2') pack_lib('i386', tvm_multilib) } @@ -221,7 +221,7 @@ stage('Build') { node('ARM') { ws(per_exec_ws("tvm/build-arm")) { init_git() - sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_config_build_arm.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" make(ci_arm, 'build', '-j4') pack_lib('arm', tvm_multilib) } @@ -231,11 +231,11 @@ stage('Build') { node('CPU') { ws(per_exec_ws("tvm/build-qemu")) { init_git() - sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_config_build_qemu.sh" + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" make(ci_qemu, 'build', '-j2') timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_python_microtvm.sh" + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" junit "build/pytest-results/*.xml" } } @@ -250,10 +250,10 @@ stage('Unit Test') { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_sphinx_precheck.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_unittest_gpuonly.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_integration_gpuonly.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" junit "build/pytest-results/*.xml" } } @@ -265,10 +265,10 @@ stage('Unit Test') { init_git() unpack_lib('i386', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" junit "build/pytest-results/*.xml" } } @@ -280,8 +280,8 @@ stage('Unit Test') { init_git() unpack_lib('arm', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh" junit "build/pytest-results/*.xml" // sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh" } @@ -294,8 +294,8 @@ stage('Unit Test') { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_java_unittest.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" } } } @@ -309,8 +309,8 @@ stage('Integration Test') { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_topi.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh" junit "build/pytest-results/*.xml" } } @@ -322,8 +322,8 @@ stage('Integration Test') { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_frontend.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" junit "build/pytest-results/*.xml" } } @@ -335,8 +335,8 @@ stage('Integration Test') { init_git() unpack_lib('cpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_frontend_cpu.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" junit "build/pytest-results/*.xml" } } @@ -348,8 +348,8 @@ stage('Integration Test') { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_docs.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh" } pack_lib('mydocs', 'docs.tgz') } @@ -361,13 +361,13 @@ stage('Integration Test') { stage('Build packages') { parallel 'conda CPU': { node('CPU') { - sh "${docker_run} tlcpack/conda-cpu -- ./conda/build_cpu.sh + sh "${docker_run} tlcpack/conda-cpu ./conda/build_cpu.sh } }, 'conda cuda': { node('CPU') { - sh "${docker_run} tlcpack/conda-cuda90 -- ./conda/build_cuda.sh - sh "${docker_run} tlcpack/conda-cuda100 -- ./conda/build_cuda.sh + sh "${docker_run} tlcpack/conda-cuda90 ./conda/build_cuda.sh + sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh } } // Here we could upload the packages to anaconda for releases diff --git a/docker/bash.sh b/docker/bash.sh index b01c14a4d6c20..0fa5d402e1308 100755 --- a/docker/bash.sh +++ b/docker/bash.sh @@ -20,7 +20,10 @@ # # Start a bash, mount REPO_MOUNT_POINT to be current directory. # -# Usage: bash.sh [-i] [--net=host] [--mount path] +# Usage: docker/bash.sh [-i|--interactive] [--net=host] +# [--mount MOUNT_DIR] [--repo-mount-point REPO_MOUNT_POINT] +# [--dry-run] +# [--] [COMMAND] # # Usage: docker/bash.sh # Starts an interactive session @@ -107,17 +110,20 @@ COMMAND=bash REPO_MOUNT_POINT="${WORKSPACE}" MOUNT_DIRS=( ) -trap "show_usage >&2" ERR -args=$(getopt \ - --name bash.sh \ - --options "ih" \ - --longoptions "interactive,net=host,mount:,dry-run" \ - --longoptions "repo-mount-point:" \ - --longoptions "help" \ - --unquoted \ - -- "$@") -trap - ERR -set -- $args +function parse_error() { + echo "$@" >&2 + show_usage >&2 + exit 1 +} + + +# Handle joined flags, such as interpreting -ih as -i -h. Either rewrites +# the current argument if it is a joined argument, or shifts all arguments +# otherwise. Should be called as "eval $break_joined_flag" where joined +# flags are possible. Can't use a function definition, because it needs +# to overwrite the parent scope's behavior. +break_joined_flag='if (( ${#1} == 2 )); then shift; else set -- -"${1#-i}" "${@:2}"; fi' + while (( $# )); do case "$1" in @@ -126,9 +132,9 @@ while (( $# )); do exit 0 ;; - -i|--interactive) + -i*|--interactive) INTERACTIVE=true - shift + eval $break_joined_flag ;; --net=host) @@ -137,8 +143,16 @@ while (( $# )); do ;; --mount) - MOUNT_DIRS+=("$2") - shift + if [[ -n "$2" ]]; then + MOUNT_DIRS+=("$2") + shift 2 + else + parse_error 'ERROR: --mount requires a non-empty argument' + fi + ;; + + --mount=?*) + MOUNT_DIRS+=("${1#*=}") shift ;; @@ -148,13 +162,22 @@ while (( $# )); do ;; --repo-mount-point) - REPO_MOUNT_POINT="$2" - shift + if [[ -n "$2" ]]; then + REPO_MOUNT_POINT="$2" + shift 2 + else + parse_error 'ERROR: --repo-mount-point requires a non-empty argument' + fi + ;; + + --repo-mount-point=?*) + REPO_MOUNT_POINT="${1#*=}" shift ;; --) shift + COMMAND="$@" break ;; @@ -168,25 +191,24 @@ while (( $# )); do ;; *) - echo "Internal Error: getopt should output -- before positional" >&2 - exit 2 + # First positional argument is the image name, all + # remaining below to the COMMAND. + if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then + DOCKER_IMAGE_NAME=$1 + shift + else + COMMAND="$@" + break + fi ;; esac done -if (( $# )); then - DOCKER_IMAGE_NAME=$1 - shift -else +if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then echo "Error: Missing DOCKER_IMAGE_NAME" >&2 show_usage >&2 fi -if (( $# )); then - COMMAND="$@" -fi - - if [[ "${COMMAND}" = bash ]]; then INTERACTIVE=true USE_NET_HOST=true @@ -341,6 +363,8 @@ DOCKER_CMD=(${DOCKER_BINARY} run "${COMMAND[@]}" ) +echo "TEMP = " "${DOCKER_CMD[@]}" + if ${DRY_RUN}; then echo "${DOCKER_CMD[@]}" else