forked from sonic-net/sonic-mgmt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_tests.sh
executable file
·478 lines (428 loc) · 17.7 KB
/
run_tests.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
#!/bin/bash
function show_help_and_exit()
{
echo "Usage ${SCRIPT} [options]"
echo " options with (*) must be provided"
echo " -h -? : get this help"
echo " -a <True|False>: specify if auto-recover is allowed (default: True)"
echo " -b <master_id> : specify name of k8s master group used in k8s inventory, format: k8s_vms{msetnumber}_{servernumber}"
echo " -c <testcases> : specify test cases to execute (default: none, executed all matched)"
echo " -d <dut name> : specify comma-separated DUT names (default: DUT name associated with testbed in testbed file)"
echo " -e <parameters>: specify extra parameter(s) (default: none)"
echo " -E : exit for any error (default: False)"
echo " -f <tb file> : specify testbed file (default testbed.yaml)"
echo " -i <inventory> : specify inventory name"
echo " -I <folders> : specify list of test folders, filter out test cases not in the folders (default: none)"
echo " -k <file log> : specify file log level: error|warning|info|debug (default debug)"
echo " -l <cli log> : specify cli log level: error|warning|info|debug (default warning)"
echo " -m <method> : specify test method group|individual|debug (default group)"
echo " -n <testbed> : specify testbed name (*)"
echo " -o : omit the file logs"
echo " -O : run tests in input order rather than alphabetical order"
echo " -p <path> : specify log path (default: logs)"
echo " -q <n> : test will stop after <n> failures (default: not stop on failure)"
echo " -r : retain individual file log for suceeded tests (default: remove)"
echo " -s <tests> : specify list of tests to skip (default: none)"
echo " -S <folders> : specify list of test folders to skip (default: none)"
echo " -t <topology> : specify toplogy: t0|t1|any|combo like t0,any (*)"
echo " -u : bypass util group"
echo " -x : print commands and their arguments as they are executed"
exit $1
}
function get_dut_from_testbed_file() {
if [[ -z ${DUT_NAME} ]]; then
if [[ $TESTBED_FILE == *.csv ]];
then
LINE=`cat $TESTBED_FILE | grep "^$TESTBED_NAME"`
if [[ -z ${LINE} ]]; then
echo "Unable to find testbed '$TESTBED_NAME' in testbed file '$TESTBED_FILE'"
show_help_and_exit 4
fi
IFS=',' read -ra ARRAY <<< "$LINE"
DUT_NAME=${ARRAY[9]//[\[\] ]/}
elif [[ $TESTBED_FILE == *.yaml ]];
then
content=$(python3 -c "from __future__ import print_function; import yaml; print('+'.join(str(tb) for tb in yaml.safe_load(open('$TESTBED_FILE')) if '$TESTBED_NAME'==tb['conf-name']))")
if [[ -z ${content} ]]; then
echo "Unable to find testbed '$TESTBED_NAME' in testbed file '$TESTBED_FILE'"
show_help_and_exit 4
fi
IFS=$'+' read -r -a tb_lines <<< $content
tb_line=${tb_lines[0]}
DUT_NAME=$(python3 -c "from __future__ import print_function; tb=eval(\"$tb_line\"); print(\",\".join(tb[\"dut\"]))")
fi
fi
}
function validate_parameters()
{
RET=0
if [[ -z ${DUT_NAME} ]]; then
echo "DUT name (-d) is not set.."
RET=1
fi
if [[ -z ${TESTBED_NAME} ]]; then
echo "Testbed name (-n) is not set.."
RET=2
fi
if [[ -z ${TOPOLOGY} && -z ${TEST_CASES} && -z ${TEST_CASES_FILE} ]]; then
echo "Neither TOPOLOGY (-t) nor test case list (-c) nor test case list file (-F) is set.."
RET=3
fi
if [[ ${TEST_CASES} && ${TEST_CASES_FILE} ]]; then
echo "Specified both a test case list (-c) and a test case list file (-F).."
RET=4
fi
if [[ ${RET} != 0 ]]; then
show_help_and_exit ${RET}
fi
}
function setup_environment()
{
SCRIPT=$0
FULL_PATH=$(realpath ${SCRIPT})
SCRIPT_PATH=$(dirname ${FULL_PATH})
BASE_PATH=$(dirname ${SCRIPT_PATH})
LOG_PATH="logs"
AUTO_RECOVER="True"
BYPASS_UTIL="False"
CLI_LOG_LEVEL='warning'
EXTRA_PARAMETERS=""
FILE_LOG_LEVEL='debug'
INCLUDE_FOLDERS=""
INVENTORY="${BASE_PATH}/ansible/lab,${BASE_PATH}/ansible/veos"
KUBE_MASTER_ID="unset"
OMIT_FILE_LOG="False"
RETAIN_SUCCESS_LOG="False"
SKIP_SCRIPTS=""
SKIP_FOLDERS="ptftests acstests saitests scripts k8s sai_qualify"
TESTBED_FILE="${BASE_PATH}/ansible/testbed.yaml"
TEST_CASES=""
TEST_FILTER=""
TEST_INPUT_ORDER="False"
TEST_METHOD='group'
TEST_MAX_FAIL=0
export ANSIBLE_CONFIG=${BASE_PATH}/ansible
export ANSIBLE_LIBRARY=${BASE_PATH}/ansible/library/
export ANSIBLE_CONNECTION_PLUGINS=${BASE_PATH}/ansible/plugins/connection
export ANSIBLE_CLICONF_PLUGINS=${BASE_PATH}/ansible/cliconf_plugins
export ANSIBLE_TERMINAL_PLUGINS=${BASE_PATH}/ansible/terminal_plugins
# Kill pytest and ansible-playbook process
pkill --signal 9 pytest
pkill --signal 9 ansible-playbook
# Kill ssh initiated by ansible, try to match full command begins with 'ssh' and contains path '/.ansible'
pkill --signal 9 -f "^ssh.*/\.ansible"
rm -fr ${BASE_PATH}/tests/_cache
}
function setup_test_options()
{
# If a test script is explicitly specified in pytest command line, then use `--ignore` to ignore it will not work
# Below logic is to ensure that SKIP_FOLDERS and SKIP_SCRIPTS take precedence over the specified TEST_CASES.
# If a test script is in both ${TEST_CASES} and ${SKIP_SCRIPTS}, the script will not be executed. This design is
# for the scenario of specifying test scripts using pattern like `subfolder/test_*.py`. The pattern will be
# expanded to matched test scripts by bash. Among the expanded scripts, we may want to skip a few. Then we can
# explicitly specify the script to be skipped.
ignores=$(python3 -c "print('|'.join('''$SKIP_FOLDERS'''.split()))")
if [[ -z ${TEST_CASES} && -z ${TEST_CASES_FILE} ]]; then
# When TEST_CASES is not specified, find all the possible scripts, ignore the scripts under $SKIP_FOLDERS
all_scripts=$(find ./ -name 'test_*.py' | sed s:^./:: | grep -vE "^(${ignores})")
ignore_files=("test_pretest.py" "test_posttest.py")
for ((i=${#all_scripts[@]}-1; i>=0; i--)); do
# Check if the current element is in the sub array
if [[ " ${ignore_files[@]} " =~ " ${all_scripts[i]} " ]]; then
# Remove the element from the main array
unset 'all_scripts[i]'
fi
done
else
if [[ ${TEST_CASES_FILE} ]]; then
TEST_CASES="${TEST_CASES} $(cat ${TEST_CASES_FILE} | tr '\n' ' ')"
fi
# When TEST_CASES is specified, ignore the scripts under $SKIP_FOLDERS
all_scripts=""
for test_script in ${TEST_CASES}; do
all_scripts="${all_scripts} $(echo ${test_script} | sed s:^./:: | grep -vE "^(${ignores})")"
done
fi
# Ignore the scripts specified in $SKIP_SCRIPTS
if [[ x"${TEST_INPUT_ORDER}" == x"True" ]]; then
TEST_CASES=$(python3 -c "print('\n'.join([testcase for testcase in list('''$all_scripts'''.split()) if testcase not in set('''$SKIP_SCRIPTS'''.split())]))")
else
TEST_CASES=$(python3 -c "print('\n'.join(set('''$all_scripts'''.split()) - set('''$SKIP_SCRIPTS'''.split())))" | sort)
fi
# Check against $INCLUDE_FOLDERS, filter out test cases not in the specified folders
FINAL_CASES=""
includes=$(python3 -c "print('|'.join('''$INCLUDE_FOLDERS'''.split()))")
for test_case in ${TEST_CASES}; do
FINAL_CASES="${FINAL_CASES} $(echo ${test_case} | grep -E "^(${includes})")"
done
TEST_CASES=$(python3 -c "print('\n'.join('''${FINAL_CASES}'''.split()))")
if [[ -z $TEST_CASES ]]; then
echo "No test case to run based on conditions of '-c', '-I' and '-S'. Please check..."
show_help_and_exit 1
fi
PYTEST_COMMON_OPTS="--inventory ${INVENTORY} \
--host-pattern ${DUT_NAME} \
--testbed ${TESTBED_NAME} \
--testbed_file ${TESTBED_FILE} \
--log-cli-level ${CLI_LOG_LEVEL} \
--log-file-level ${FILE_LOG_LEVEL} \
--kube_master ${KUBE_MASTER_ID} \
--showlocals \
--assert plain \
--show-capture no \
-rav"
if [[ x"${AUTO_RECOVER}" == x"True" ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --allow_recover"
fi
for skip in ${SKIP_SCRIPTS} ${SKIP_FOLDERS}; do
if [[ $skip == *"::"* ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --deselect=${skip}"
else
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --ignore=${skip}"
fi
done
if [[ ! -z $TEST_FILTER ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} -k ${TEST_FILTER}"
fi
if [[ -d ${LOG_PATH} ]]; then
rm -rf ${LOG_PATH}
fi
if [[ x"${OMIT_FILE_LOG}" == x"True" ]]; then
PRET_LOGGING_OPTIONS=""
POST_LOGGING_OPTIONS=""
TEST_LOGGING_OPTIONS=""
else
mkdir -p ${LOG_PATH}
PRET_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/pretest.xml --log-file=${LOG_PATH}/pretest.log"
POST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/posttest.xml --log-file=${LOG_PATH}/posttest.log"
TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log"
fi
UTIL_TOPOLOGY_OPTIONS="--topology util"
if [[ -z ${TOPOLOGY} ]]; then
TEST_TOPOLOGY_OPTIONS=""
else
TEST_TOPOLOGY_OPTIONS="--topology ${TOPOLOGY}"
fi
PYTEST_UTIL_OPTS=${PYTEST_COMMON_OPTS}
# Max failure only applicable to the test session. Not the preparation and cleanup session.
if [[ ${TEST_MAX_FAIL} != 0 ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --maxfail=${TEST_MAX_FAIL}"
fi
}
function run_debug_tests()
{
echo "=== Show test settings ==="
echo "SCRIPT: ${SCRIPT}"
echo "FULL_PATH: ${FULL_PATH}"
echo "SCRIPT_PATH: ${SCRIPT_PATH}"
echo "BASE_PATH: ${BASE_PATH}"
echo "ANSIBLE_CONFIG: ${ANSIBLE_CONFIG}"
echo "ANSIBLE_LIBRARY: ${ANSIBLE_LIBRARY}"
echo "AUTO_RECOVER: ${AUTO_RECOVER}"
echo "BYPASS_UTIL: ${BYPASS_UTIL}"
echo "CLI_LOG_LEVEL: ${CLI_LOG_LEVEL}"
echo "EXTRA_PARAMETERS: ${EXTRA_PARAMETERS}"
echo "FILE_LOG_LEVEL: ${FILE_LOG_LEVEL}"
echo "INCLUDE_FOLDERS: ${INCLUDE_FOLDERS}"
echo "INVENTORY: ${INVENTORY}"
echo "LOG_PATH: ${LOG_PATH}"
echo "OMIT_FILE_LOG: ${OMIT_FILE_LOG}"
echo "RETAIN_SUCCESS_LOG: ${RETAIN_SUCCESS_LOG}"
echo "SKIP_SCRIPTS: ${SKIP_SCRIPTS}"
echo "SKIP_FOLDERS: ${SKIP_FOLDERS}"
echo "TEST_CASES: ${TEST_CASES}"
echo "TEST_CASES_FILE: ${TEST_CASES_FILE}"
echo "TEST_FILTER: ${TEST_FILTER}"
echo "TEST_INPUT_ORDER: ${TEST_INPUT_ORDER}"
echo "TEST_MAX_FAIL: ${TEST_MAX_FAIL}"
echo "TEST_METHOD: ${TEST_METHOD}"
echo "TESTBED_FILE: ${TESTBED_FILE}"
echo "TEST_LOGGING_OPTIONS: ${TEST_LOGGING_OPTIONS}"
echo "TEST_TOPOLOGY_OPTIONS: ${TEST_TOPOLOGY_OPTIONS}"
echo "PRET_LOGGING_OPTIONS: ${PRET_LOGGING_OPTIONS}"
echo "POST_LOGGING_OPTIONS: ${POST_LOGGING_OPTIONS}"
echo "UTIL_TOPOLOGY_OPTIONS: ${UTIL_TOPOLOGY_OPTIONS}"
echo "PYTEST_COMMON_OPTS: ${PYTEST_COMMON_OPTS}"
}
# Extra parameters for pre/post test stage
function pre_post_extra_params()
{
local params=${EXTRA_PARAMETERS}
# The enable_macsec option controls the enabling of macsec links of topology.
# It aims to verify common test cases work as expected under macsec links.
# At pre/post test stage, enabling macsec only wastes time and is not needed.
params=${params//--enable_macsec/}
echo $params
}
function prepare_dut()
{
echo "=== Preparing DUT for subsequent tests ==="
echo Running: python3 -m pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} $(pre_post_extra_params) -m pretest
python3 -m pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} $(pre_post_extra_params) -m pretest
}
function cleanup_dut()
{
echo "=== Cleaning up DUT after tests ==="
echo Running: python3 -m pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} $(pre_post_extra_params) -m posttest
python3 -m pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} $(pre_post_extra_params) -m posttest
}
function run_group_tests()
{
echo "=== Running tests in groups ==="
echo Running: python3 -m pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
python3 -m pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} --cache-clear
}
function run_individual_tests()
{
EXIT_CODE=0
CACHE_CLEAR="--cache-clear"
echo "=== Running tests individually ==="
for test_script in ${TEST_CASES}; do
if [[ x"${OMIT_FILE_LOG}" != x"True" ]]; then
test_dir=$(dirname ${test_script})
script_name=$(basename ${test_script})
test_name=${script_name%.py}
if [[ ${test_dir} != "." ]]; then
mkdir -p ${LOG_PATH}/${test_dir}
fi
TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml"
fi
echo Running: python3 -m pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
python3 -m pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} ${CACHE_CLEAR}
ret_code=$?
# Clear pytest cache for the first run
if [[ -n ${CACHE_CLEAR} ]]; then
CACHE_CLEAR=""
fi
# If test passed, no need to keep its log.
if [ ${ret_code} -eq 0 ]; then
if [[ x"${OMIT_FILE_LOG}" != x"True" && x"${RETAIN_SUCCESS_LOG}" == x"False" ]]; then
rm -f ${LOG_PATH}/${test_dir}/${test_name}.log
fi
else
# rc 10 means pre-test sanity check failed, rc 12 means boths pre-test and post-test sanity check failed
if [ ${ret_code} -eq 10 ] || [ ${ret_code} -eq 12 ]; then
echo "=== Sanity check failed for $test_script. Skip rest of the scripts if there is any. ==="
return ${ret_code}
fi
# rc 15 means duthosts fixture failed
if [ ${ret_code} -eq 15 ]; then
echo "=== duthosts fixture failed for $test_script. Skip rest of the scripts if there is any. ==="
return ${ret_code}
fi
EXIT_CODE=1
if [[ ${TEST_MAX_FAIL} != 0 ]]; then
return ${EXIT_CODE}
fi
fi
done
return ${EXIT_CODE}
}
setup_environment
while getopts "h?a:b:c:C:d:e:Ef:F:i:I:k:l:m:n:oOp:q:rs:S:t:ux" opt; do
case ${opt} in
h|\? )
show_help_and_exit 0
;;
a )
AUTO_RECOVER=${OPTARG}
;;
b )
KUBE_MASTER_ID=${OPTARG}
SKIP_FOLDERS=${SKIP_FOLDERS//k8s/}
;;
c )
TEST_CASES="${TEST_CASES} ${OPTARG}"
;;
C )
TEST_FILTER="${TEST_FILTER} ${OPTARG}"
;;
d )
DUT_NAME=${OPTARG}
;;
e )
EXTRA_PARAMETERS="${EXTRA_PARAMETERS} ${OPTARG}"
;;
E )
set -e
;;
f )
TESTBED_FILE=${OPTARG}
;;
F )
TEST_CASES_FILE="${OPTARG}"
;;
i )
INVENTORY=${OPTARG}
;;
I )
INCLUDE_FOLDERS="${INCLUDE_FOLDERS} ${OPTARG}"
;;
k )
FILE_LOG_LEVEL=${OPTARG}
;;
l )
CLI_LOG_LEVEL=${OPTARG}
;;
m )
TEST_METHOD=${OPTARG}
;;
n )
TESTBED_NAME=${OPTARG}
;;
o )
OMIT_FILE_LOG="True"
;;
O )
TEST_INPUT_ORDER="True"
;;
p )
LOG_PATH=${OPTARG}
;;
q )
TEST_MAX_FAIL=${OPTARG}
;;
r )
RETAIN_SUCCESS_LOG="True"
;;
s )
SKIP_SCRIPTS="${SKIP_SCRIPTS} ${OPTARG}"
;;
S )
SKIP_FOLDERS="${SKIP_FOLDERS} ${OPTARG}"
;;
t )
TOPOLOGY=${OPTARG}
;;
u )
BYPASS_UTIL="True"
;;
x )
set -x
;;
esac
done
get_dut_from_testbed_file
if [[ x"${TEST_METHOD}" != x"debug" ]]; then
validate_parameters
fi
setup_test_options
if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then
RESULT=0
prepare_dut || RESULT=$?
if [[ ${RESULT} != 0 ]]; then
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "!!!!! Prepare DUT failed, skip testing !!!!!"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# exit with specific code 65 for pretest failed.
# user-defined exit codes is the range 64 - 113.
# nightly test pipeline can check this code to decide if fails pipeline.
exit 65
fi
fi
RC=0
run_${TEST_METHOD}_tests || RC=$?
if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then
cleanup_dut
fi
exit ${RC}