Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

testsuite: sharness sync #206

Merged
merged 2 commits into from
Sep 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 4 additions & 59 deletions t/sharness.d/02-flux-sharness.sh
Original file line number Diff line number Diff line change
Expand Up @@ -151,73 +151,18 @@ test_on_rank() {
flux exec --rank=${ranks} "$@"
}

#
# Check for a program and skip all tests immediately if not found.
# Exports the program in SHARNESS_test_skip_all_prereq for later
# check in TEST_CHECK_PREREQS
#
skip_all_unless_have()
{
prog_path=$(which $1 2>/dev/null)
if test -z "$prog_path"; then
skip_all="$1 not found. Skipping all tests"
test_done
fi
eval "$1=$prog_path"
export SHARNESS_test_skip_all_prereq="$SHARNESS_test_skip_all_prereq,$1"
}

GLOBAL_PROGRAM_PREREQS="HAVE_JQ:jq"

#
# Check for programs in GLOBAL_PROGRAM_PREREQS and set prereq and
# "<name>=<program_path>" if found. If TEST_CHECK_PREREQS is set, then
# create a wrapper script in trash-directory/bin which will ensure the
# prereq (or global skip_all above) has been used before each invocation
# of program. This will catch places in testsuite where program is used
# without testing the prerequisite.
#
for prereq in $GLOBAL_PROGRAM_PREREQS; do
prog=${prereq#*:}
path_prog=$(which ${prog} 2>/dev/null || echo "/bin/false")
req=${prereq%:*}
test "${path_prog}" = "/bin/false" || test_set_prereq ${req}
eval "${prog}=${path_prog}"
if test -n "$TEST_CHECK_PREREQS"; then
dir=${SHARNESS_TRASH_DIRECTORY}/bin
mkdir -p ${dir}
cat <<-EOF > ${dir}/$prog
#!/bin/sh
saved_IFS=\$IFS
IFS=,
for x in \$test_prereq; do
test "\$x" = "$req" && ok=t
done
for x in \$SHARNESS_test_skip_all_prereq; do
test "\$x" = "$prog" && ok=t
done
test -n "\$ok" && exec $path_prog "\$@"
echo >&2 "Use of $prog without prereq $req!"
exit 1
EOF
chmod +x ${dir}/$prog
# Override $$prog to point to wrapper script:
eval "${prog}=${dir}/${prog}"
fi
done

if test -n "$TEST_CHECK_PREREQS"; then
export PATH=${SHARNESS_TRASH_DIRECTORY}/bin:${PATH}
fi

# Export a shorter name for this test
TEST_NAME=$SHARNESS_TEST_NAME
export TEST_NAME

# Test requirements for testsuite
if ! command -v jq >/dev/null; then
error "jq is required for the flux-coral2 testsuite"
fi
if ! run_timeout 10.0 lua -e 'require "posix"'; then
error "failed to find lua posix module in path"
fi
jq=$(command -v jq)

# Some tests in flux don't work with --chain-lint, add a prereq for
# --no-chain-lint:
Expand Down
10 changes: 5 additions & 5 deletions t/t1001-cray-pals.t
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ test_expect_success 'shell: pals shell plugin creates apinfo file' '
&& test ! -z \$PALS_APINFO && test -f \$PALS_APINFO"
'

test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for one task' '
test_expect_success 'shell: apinfo file contents are valid for one task' '
apinfo=$(flux run -o userrc=$(pwd)/$USERRC_NAME -N1 -n1 ${PYTHON:-python3} \
${SHARNESS_TEST_SRCDIR}/scripts/apinfo_checker.py) &&
echo "$apinfo" | jq -e ".version == 1" &&
Expand All @@ -175,7 +175,7 @@ test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for one task'
echo "$apinfo" | jq -e ".pes | length == 1"
'

test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for multiple tasks' '
test_expect_success 'shell: apinfo file contents are valid for multiple tasks' '
apinfo=$(flux run -o userrc=$(pwd)/$USERRC_NAME -N1 -n2 --label-io \
${PYTHON:-python3} ${SHARNESS_TEST_SRCDIR}/scripts/apinfo_checker.py \
| sed -n "s/^1: //p") &&
Expand All @@ -194,7 +194,7 @@ test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for multiple
echo "$apinfo" | jq -e ".pes | length == 2"
'

test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for multiple nodes' '
test_expect_success 'shell: apinfo file contents are valid for multiple nodes' '
apinfo=$(flux run -o userrc=$(pwd)/$USERRC_NAME -N2 -n4 --label-io \
${PYTHON:-python3} ${SHARNESS_TEST_SRCDIR}/scripts/apinfo_checker.py \
| sed -n "s/^1: //p") &&
Expand All @@ -217,7 +217,7 @@ test_expect_success HAVE_JQ 'shell: apinfo file contents are valid for multiple
echo "$apinfo" | jq -e ".pes | length == 4"
'

test_expect_success HAVE_JQ 'shell: apinfo file contents are valid with cyclic task placement' '
test_expect_success 'shell: apinfo file contents are valid with cyclic task placement' '
apinfo=$(flux run -o userrc=$(pwd)/$USERRC_NAME -N2 -n4 --label-io --taskmap=cyclic \
${PYTHON:-python3} ${SHARNESS_TEST_SRCDIR}/scripts/apinfo_checker.py \
| sed -n "s/^1: //p") &&
Expand All @@ -243,7 +243,7 @@ test_expect_success HAVE_JQ 'shell: apinfo file contents are valid with cyclic t
echo "$apinfo" | jq -e ".pes | length == 4"
'

test_expect_success HAVE_JQ 'shell: apinfo file contents are valid with manual task placement' '
test_expect_success 'shell: apinfo file contents are valid with manual task placement' '
apinfo=$(flux run -o userrc=$(pwd)/$USERRC_NAME -N2 -n4 --label-io \
--taskmap="manual:[[1,1,2,1],[0,1,2,1]]" \
${PYTHON:-python3} ${SHARNESS_TEST_SRCDIR}/scripts/apinfo_checker.py \
Expand Down
20 changes: 10 additions & 10 deletions t/t2000-dws2jgf.t
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ if test_have_prereq NO_DWS_K8S; then
test_done
fi

test_expect_success HAVE_JQ 'smoke test to ensure the storage resources are expected' '
test_expect_success 'smoke test to ensure the storage resources are expected' '
test $(kubectl get storages | wc -l) -eq 3 &&
kubectl get storages kind-worker2 -ojson | jq -e ".status.access.computes | length == 3" &&
kubectl get storages kind-worker2 -ojson | jq -e ".status.access.computes[0].name == \"compute-01\"" &&
Expand All @@ -33,36 +33,36 @@ test_expect_success HAVE_JQ 'smoke test to ensure the storage resources are expe
test $(hostname) = compute-01
'

test_expect_success HAVE_JQ 'flux-rabbitmapping outputs expected mapping' '
test_expect_success 'flux-rabbitmapping outputs expected mapping' '
flux python ${FLUX_SOURCE_DIR}/src/cmd/flux-rabbitmapping.py -i2 > rabbits.json
test_cmp ${DATADIR}/rabbits.json rabbits.json
'

test_expect_success HAVE_JQ 'flux-dws2jgf.py outputs expected JGF for single compute node' '
test_expect_success 'flux-dws2jgf.py outputs expected JGF for single compute node' '
flux R encode -Hcompute-01 | flux python ${CMD} --no-validate --cluster-name=ElCapitan \
rabbits.json | jq . > actual-compute-01.jgf &&
test_cmp ${DATADIR}/expected-compute-01.jgf actual-compute-01.jgf
'

test_expect_success HAVE_JQ 'flux-dws2jgf.py outputs expected JGF for multiple compute nodes' '
test_expect_success 'flux-dws2jgf.py outputs expected JGF for multiple compute nodes' '
flux R encode -Hcompute-[01-04] -c0-4 | flux python ${CMD} --no-validate --cluster-name=ElCapitan \
rabbits.json | jq . > actual-compute-01-04.jgf &&
test_cmp ${DATADIR}/expected-compute-01-04.jgf actual-compute-01-04.jgf
'

test_expect_success HAVE_JQ 'flux-dws2jgf.py outputs expected JGF for compute nodes not in DWS' '
test_expect_success 'flux-dws2jgf.py outputs expected JGF for compute nodes not in DWS' '
flux R encode -Hcompute-[01-04],nodws[0-5] -c0-4 | \
flux python ${CMD} --no-validate rabbits.json | jq . > actual-compute-01-nodws.jgf &&
test_cmp ${DATADIR}/expected-compute-01-nodws.jgf actual-compute-01-nodws.jgf
'

test_expect_success HAVE_JQ 'flux-dws2jgf.py handles properties correctly' '
test_expect_success 'flux-dws2jgf.py handles properties correctly' '
cat ${DATADIR}/R-properties | \
flux python ${CMD} --no-validate rabbits.json | jq . > actual-properties.jgf &&
test_cmp ${DATADIR}/expected-properties.jgf actual-properties.jgf
'

test_expect_success HAVE_JQ 'fluxion rejects a rack/rabbit job when no rabbits are recognized' '
test_expect_success 'fluxion rejects a rack/rabbit job when no rabbits are recognized' '
flux module remove -f sched-fluxion-qmanager &&
flux module remove -f sched-fluxion-resource &&
flux module reload resource &&
Expand All @@ -73,7 +73,7 @@ test_expect_success HAVE_JQ 'fluxion rejects a rack/rabbit job when no rabbits a
flux job wait-event -vt 2 ${JOBID} exception
'

test_expect_success HAVE_JQ 'fluxion can be loaded with output of dws2jgf' '
test_expect_success 'fluxion can be loaded with output of dws2jgf' '
flux run -n1 hostname &&
flux R encode -l | flux python ${CMD} --no-validate --cluster-name=ElCapitan rabbits.json \
| jq . > R.local &&
Expand All @@ -88,13 +88,13 @@ test_expect_success HAVE_JQ 'fluxion can be loaded with output of dws2jgf' '
flux job wait-event -vt 2 -m status=0 ${JOBID} finish
'

test_expect_success HAVE_JQ 'fluxion does not allocate a rack/rabbit job after adding down rabbits' '
test_expect_success 'fluxion does not allocate a rack/rabbit job after adding down rabbits' '
JOBID=$(flux job submit ${DATADIR}/rabbit-jobspec.json) &&
test_must_fail flux job wait-event -vt 2 ${JOBID} alloc &&
flux cancel $JOBID
'

test_expect_success HAVE_JQ 'fluxion allocates a rack/rabbit job when rabbit is up' '
test_expect_success 'fluxion allocates a rack/rabbit job when rabbit is up' '
${SHARNESS_TEST_SRCDIR}/scripts/set_status.py /ElCapitan0/rack0/ssd0 up &&
JOBID=$(flux job submit ${DATADIR}/rabbit-jobspec.json) &&
flux job wait-event -vt 2 -m status=0 ${JOBID} finish &&
Expand Down
Loading