Skip to content

Commit

Permalink
[Docker] Updated command-line parsing of docker/bash.sh
Browse files Browse the repository at this point in the history
- Maintained previous behavior, any unrecognized flags after the
  docker/bash.sh are part of the command, no -- is
  needed. (e.g. docker/bash.sh ci_gpu make -j2)

- Reverted changes to Jenskinsfile to add a --, no longer needed.
  • Loading branch information
Lunderberg committed Aug 6, 2021
1 parent 039b38c commit ac5637b
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 76 deletions.
94 changes: 47 additions & 47 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ stage("Sanity Check") {
node('CPU') {
ws(per_exec_ws("tvm/sanity")) {
init_git()
sh "${docker_run} ${ci_lint} -- ./tests/scripts/task_lint.sh"
sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh"
}
}
}
Expand All @@ -124,18 +124,18 @@ stage("Sanity Check") {
def make(docker_type, path, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
try {
sh "${docker_run} ${docker_type} -- ./tests/scripts/task_build.sh ${path} ${make_flag}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
// always run cpp test when build
sh "${docker_run} ${docker_type} -- ./tests/scripts/task_cpp_unittest.sh"
sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
} catch (hudson.AbortException ae) {
// script exited due to user abort, directly throw instead of retry
if (ae.getMessage().contains('script returned exit code 143')) {
throw ae
}
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} -- ./tests/scripts/task_clean.sh ${path}"
sh "${docker_run} ${docker_type} -- ./tests/scripts/task_build.sh ${path} ${make_flag}"
sh "${docker_run} ${docker_type} -- ./tests/scripts/task_cpp_unittest.sh"
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
}
}
}
Expand Down Expand Up @@ -164,11 +164,11 @@ stage('Build') {
node('GPUBUILD') {
ws(per_exec_ws("tvm/build-gpu")) {
init_git()
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_config_build_gpu.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh"
make(ci_gpu, 'build', '-j2')
pack_lib('gpu', tvm_multilib)
// compiler test
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_config_build_gpu_vulkan.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh"
make(ci_gpu, 'build2', '-j2')
}
}
Expand All @@ -177,18 +177,18 @@ stage('Build') {
node('CPU') {
ws(per_exec_ws("tvm/build-cpu")) {
init_git()
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_config_build_cpu.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh"
make(ci_cpu, 'build', '-j2')
pack_lib('cpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_vta_fsim.sh"
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_vta_tsim.sh"
// sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_golang.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh"
// sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh"
// TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch
// sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_rust.sh"
// sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -198,11 +198,11 @@ stage('Build') {
node('CPU') {
ws(per_exec_ws("tvm/build-wasm")) {
init_git()
sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_config_build_wasm.sh"
sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh"
make(ci_wasm, 'build', '-j2')
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_wasm} -- ./tests/scripts/task_web_wasm.sh"
sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh"
}
}
}
Expand All @@ -211,7 +211,7 @@ stage('Build') {
node('CPU') {
ws(per_exec_ws("tvm/build-i386")) {
init_git()
sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_config_build_i386.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh"
make(ci_i386, 'build', '-j2')
pack_lib('i386', tvm_multilib)
}
Expand All @@ -221,7 +221,7 @@ stage('Build') {
node('ARM') {
ws(per_exec_ws("tvm/build-arm")) {
init_git()
sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_config_build_arm.sh"
sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh"
make(ci_arm, 'build', '-j4')
pack_lib('arm', tvm_multilib)
}
Expand All @@ -231,11 +231,11 @@ stage('Build') {
node('CPU') {
ws(per_exec_ws("tvm/build-qemu")) {
init_git()
sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_config_build_qemu.sh"
sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh"
make(ci_qemu, 'build', '-j2')
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_qemu} -- ./tests/scripts/task_python_microtvm.sh"
sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -250,10 +250,10 @@ stage('Unit Test') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_sphinx_precheck.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_unittest_gpuonly.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_integration_gpuonly.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -265,10 +265,10 @@ stage('Unit Test') {
init_git()
unpack_lib('i386', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_i386} -- ./tests/scripts/task_python_vta_fsim.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -280,8 +280,8 @@ stage('Unit Test') {
init_git()
unpack_lib('arm', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_arm} -- ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_arm} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_unittest.sh"
junit "build/pytest-results/*.xml"
// sh "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh"
}
Expand All @@ -294,8 +294,8 @@ stage('Unit Test') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_java_unittest.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh"
}
}
}
Expand All @@ -309,8 +309,8 @@ stage('Integration Test') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_topi.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -322,8 +322,8 @@ stage('Integration Test') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_frontend.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -335,8 +335,8 @@ stage('Integration Test') {
init_git()
unpack_lib('cpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} -- ./tests/scripts/task_python_frontend_cpu.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh"
junit "build/pytest-results/*.xml"
}
}
Expand All @@ -348,8 +348,8 @@ stage('Integration Test') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} -- ./tests/scripts/task_python_docs.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh"
}
pack_lib('mydocs', 'docs.tgz')
}
Expand All @@ -361,13 +361,13 @@ stage('Integration Test') {
stage('Build packages') {
parallel 'conda CPU': {
node('CPU') {
sh "${docker_run} tlcpack/conda-cpu -- ./conda/build_cpu.sh
sh "${docker_run} tlcpack/conda-cpu ./conda/build_cpu.sh
}
},
'conda cuda': {
node('CPU') {
sh "${docker_run} tlcpack/conda-cuda90 -- ./conda/build_cuda.sh
sh "${docker_run} tlcpack/conda-cuda100 -- ./conda/build_cuda.sh
sh "${docker_run} tlcpack/conda-cuda90 ./conda/build_cuda.sh
sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh
}
}
// Here we could upload the packages to anaconda for releases
Expand Down
82 changes: 53 additions & 29 deletions docker/bash.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@
#
# Start a bash, mount REPO_MOUNT_POINT to be current directory.
#
# Usage: bash.sh <CONTAINER_TYPE> [-i] [--net=host] [--mount path] <CONTAINER_NAME> <COMMAND>
# Usage: docker/bash.sh [-i|--interactive] [--net=host]
# [--mount MOUNT_DIR] [--repo-mount-point REPO_MOUNT_POINT]
# [--dry-run]
# <DOCKER_IMAGE_NAME> [--] [COMMAND]
#
# Usage: docker/bash.sh <CONTAINER_NAME>
# Starts an interactive session
Expand Down Expand Up @@ -107,17 +110,20 @@ COMMAND=bash
REPO_MOUNT_POINT="${WORKSPACE}"
MOUNT_DIRS=( )

trap "show_usage >&2" ERR
args=$(getopt \
--name bash.sh \
--options "ih" \
--longoptions "interactive,net=host,mount:,dry-run" \
--longoptions "repo-mount-point:" \
--longoptions "help" \
--unquoted \
-- "$@")
trap - ERR
set -- $args
function parse_error() {
echo "$@" >&2
show_usage >&2
exit 1
}


# Handle joined flags, such as interpreting -ih as -i -h. Either rewrites
# the current argument if it is a joined argument, or shifts all arguments
# otherwise. Should be called as "eval $break_joined_flag" where joined
# flags are possible. Can't use a function definition, because it needs
# to overwrite the parent scope's behavior.
break_joined_flag='if (( ${#1} == 2 )); then shift; else set -- -"${1#-i}" "${@:2}"; fi'


while (( $# )); do
case "$1" in
Expand All @@ -126,9 +132,9 @@ while (( $# )); do
exit 0
;;

-i|--interactive)
-i*|--interactive)
INTERACTIVE=true
shift
eval $break_joined_flag
;;

--net=host)
Expand All @@ -137,8 +143,16 @@ while (( $# )); do
;;

--mount)
MOUNT_DIRS+=("$2")
shift
if [[ -n "$2" ]]; then
MOUNT_DIRS+=("$2")
shift 2
else
parse_error 'ERROR: --mount requires a non-empty argument'
fi
;;

--mount=?*)
MOUNT_DIRS+=("${1#*=}")
shift
;;

Expand All @@ -148,13 +162,22 @@ while (( $# )); do
;;

--repo-mount-point)
REPO_MOUNT_POINT="$2"
shift
if [[ -n "$2" ]]; then
REPO_MOUNT_POINT="$2"
shift 2
else
parse_error 'ERROR: --repo-mount-point requires a non-empty argument'
fi
;;

--repo-mount-point=?*)
REPO_MOUNT_POINT="${1#*=}"
shift
;;

--)
shift
COMMAND="$@"
break
;;

Expand All @@ -168,25 +191,24 @@ while (( $# )); do
;;

*)
echo "Internal Error: getopt should output -- before positional" >&2
exit 2
# First positional argument is the image name, all
# remaining below to the COMMAND.
if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then
DOCKER_IMAGE_NAME=$1
shift
else
COMMAND="$@"
break
fi
;;
esac
done

if (( $# )); then
DOCKER_IMAGE_NAME=$1
shift
else
if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then
echo "Error: Missing DOCKER_IMAGE_NAME" >&2
show_usage >&2
fi

if (( $# )); then
COMMAND="$@"
fi


if [[ "${COMMAND}" = bash ]]; then
INTERACTIVE=true
USE_NET_HOST=true
Expand Down Expand Up @@ -341,6 +363,8 @@ DOCKER_CMD=(${DOCKER_BINARY} run
"${COMMAND[@]}"
)

echo "TEMP = " "${DOCKER_CMD[@]}"

if ${DRY_RUN}; then
echo "${DOCKER_CMD[@]}"
else
Expand Down

0 comments on commit ac5637b

Please sign in to comment.