-
Notifications
You must be signed in to change notification settings - Fork 0
/
check_onnx_model.sh
executable file
·110 lines (90 loc) · 3.28 KB
/
check_onnx_model.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/usr/bin/env bash
set -x
if [ $# -eq 0 ]; then
echo "Usage : check_onnx_model.sh model_file.onnx [trtexec options like --fp16 or --workspace=2000...]"
exit 1
fi
MODEL_PATH="$1"
# By default mount the current folder
VOLUME_HOST="$(pwd)"
VOLUME_DOCKER="/host_volume"
TRTEXEC_ARGS="--onnx=$VOLUME_DOCKER/$MODEL_PATH"
DOCKER_IMAGE_OLDER="adujardin/tensorrt-trtexec:"
declare -A hashmap
#hashmap["4.0"]="${DOCKER_IMAGE_OLDER}4.0"
hashmap["4.0"]="nvcr.io/nvidia/tensorrt:18.06-py3"
#hashmap["5.0"]="${DOCKER_IMAGE_OLDER}5.0"
hashmap["5.0"]="nvcr.io/nvidia/tensorrt:18.10-py3"
#hashmap["5.1"]="${DOCKER_IMAGE_OLDER}5.1"
hashmap["5.1"]="nvcr.io/nvidia/tensorrt:19.03-py3"
#hashmap["6.0"]="${DOCKER_IMAGE_OLDER}6.0"
hashmap["6.0"]="nvcr.io/nvidia/tensorrt:19.09-py3"
#hashmap["7.0"]="${DOCKER_IMAGE_OLDER}7.0"
hashmap["7.0"]="nvcr.io/nvidia/tensorrt:20.02-py3"
hashmap["7.1"]="nvcr.io/nvidia/tensorrt:20.07-py3"
hashmap["7.2"]="nvcr.io/nvidia/tensorrt:20.11-py3"
hashmap["8.0"]="nvcr.io/nvidia/tensorrt:21.07-py3"
hashmap["8.2"]="nvcr.io/nvidia/tensorrt:21.12-py3"
hashmap["8.4"]="nvcr.io/nvidia/tensorrt:22.07-py3"
hashmap["8.5"]="nvcr.io/nvidia/tensorrt:22.12-py3"
hashmap["8.6"]="nvcr.io/nvidia/tensorrt:23.04-py3"
TRT_COMMAND=""
declare -A hashmap_jp
hashmap_jp["4.0"]="3.3"
hashmap_jp["5.0"]="4.1.1, 4.2.0"
hashmap_jp["5.1"]="4.2.1"
hashmap_jp["6.0"]="4.3"
hashmap_jp["7.0"]=""
hashmap_jp["7.1"]="4.4.X, 4.5.X"
hashmap_jp["7.2"]=""
hashmap_jp["8.0"]="4.6.0"
hashmap_jp["8.2"]="4.6.1"
hashmap_jp["8.4"]="5.0.2"
hashmap_jp["8.5"]="5.1"
# JP 3.3 : TRT 4.0
# JP 4.1.1 : TRT 5.0
# JP 4.2.0 : TRT 5.0
# JP 4.2.1 : TRT 5.1
# JP 4.3 : TRT 6.0
# JP 4.4 : TRT 7.1
#trt_versions=( "4.0" "5.0" "5.1" "6.0" "7.0" )
trt_versions=( "8.0" "8.2" "8.4" "8.5" "8.6")
echo -e "Testing TensorRT..."
for version in "${trt_versions[@]}"; do
echo -e " ${version}"
docker_container="${hashmap[${version}]}"
if [[ $docker_container == *"nvcr.io/nvidia"* ]]; then
TRT_COMMAND="trtexec"
else
TRT_COMMAND=""
fi
docker run --gpus all -v "$VOLUME_HOST:$VOLUME_DOCKER":ro $docker_container $TRT_COMMAND $TRTEXEC_ARGS 2>&1 | tee "log_trt${version}_${MODEL_PATH}.txt" #&> "log_trt${version}_${MODEL_PATH}.txt"
done
echo -e "\n=====================================================\n"
fail_str="&&&& FAILED TensorRT.trtexec"
ok_str="&&&& PASSED TensorRT.trtexec"
log_file="log_${MODEL_PATH}.txt"
raw_log_file="rawlog_${MODEL_PATH}.txt"
for version in "${trt_versions[@]}"; do
in_log_file="log_trt${version}_${MODEL_PATH}.txt"
echo -e "\n\n==================== TensorRT ${version} ===================\n" >> "${log_file}"
echo -e "\n\n==================== TensorRT ${version} ===================\n" >> "${raw_log_file}"
tail -n 20 "${in_log_file}" >> "${log_file}"
cat "${in_log_file}" >> "${raw_log_file}"
if grep -q "${ok_str}" "${in_log_file}"; then
printf "\033[32m PASSED \033[39m"
else
printf "\033[31m FAILED \033[39m"
fi
jp_ver_str="${hashmap_jp[${version}]}"
display_str="TensorRT ${version}"
if [ -z "${jp_ver_str}" ]; then
echo -en "${display_str}\n"
else
echo -en "${display_str} (JetPack ${jp_ver_str})\n"
fi
rm "${in_log_file}"
done
# Verbose
#cat "log_${MODEL_PATH}.txt"
echo -e "\nFor more information :\n cat ${log_file}\n"