Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add tier-0 test case #486

Merged
merged 3 commits into from
Jul 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 18 additions & 2 deletions .packit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,11 @@ jobs:

- job: tests
trigger: pull_request
pengshanyu marked this conversation as resolved.
Show resolved Hide resolved
identifier: e2e-tiers
tmt_plan: /plans/e2e/tier-0
identifier: e2e-multi-bluechi-agents
tmt_plan: /plans/e2e/multi-bluechi-agents
targets:
- centos-stream-9-x86_64
manual_trigger: true
tf_extra_params:
environments:
- artifacts:
Expand Down Expand Up @@ -91,6 +92,21 @@ jobs:
- size: ">= 20 GB"
- size: ">= 20 GB"

- job: tests
trigger: pull_request
identifier: qm-tier-0
tmt_plan: /plans/e2e/tier-0
targets:
- centos-stream-9-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/g/centos-automotive-sig/bluechi-snapshot/repo/centos-stream-9
hardware:
disk:
- size: ">= 20 GB"

- job: propose_downstream
trigger: release
update_release: false
Expand Down
11 changes: 0 additions & 11 deletions plans/e2e/ffi.fmf
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ summary: FFI - QM FreedomFromInterference

environment:
CONTROL_CONTAINER_NAME: host
FFI_SETUP_OPTIONS: none

discover:
how: fmf
Expand All @@ -11,17 +10,7 @@ discover:
provision:
how: local

prepare:
- name: Install rpms
how: install
package: podman

adjust:
- when: run == manual
environment+:
# Sample code to use manual packit repo
PACKIT_COPR_PROJECT: packit/containers-qm-291

- when: distro == centos-stream-9, fedora
environment+:
FFI_SETUP_OPTIONS: --set-qm-disk-part=yes
Expand Down
34 changes: 34 additions & 0 deletions plans/e2e/multi-bluechi-agents.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
summary: multiple bluechi-agents test - QM Interconnect through bluechi

discover:
how: fmf
filter: tag:multi-bluechi-agents

provision:
how: local

adjust:
- when: distro == centos-stream-9 or distro == fedora
prepare+:
- name: Prepare Repos
how: shell
script: |
dnf install -y dnf-plugin-config-manager epel-release
dnf config-manager -y --set-enabled crb

- name: install repos
how: install
package:
- podman

- name: Set QM env
how: shell
script: |
cd tests/e2e
./run-test-e2e --skip-tests=yes

execute:
how: tmt

report:
how: junit
38 changes: 9 additions & 29 deletions plans/e2e/tier-0.fmf
Original file line number Diff line number Diff line change
@@ -1,39 +1,19 @@
summary: Tier 0 - QM Interconnect through bluechi
summary: Tier 0 - QM sanity test

discover:
how: fmf
filter: tier:0

provision:
how: local

adjust:
- when: run == manual
environment+:
# Sample code to use manual packit repo
PACKIT_COPR_PROJECT: packit/containers-qm-291

- when: distro == centos-stream-9 or distro == fedora
prepare+:
- name: Prepare Repos
how: shell
script: |
dnf install -y dnf-plugin-config-manager epel-release
dnf config-manager -y --set-enabled crb

- name: install repos
how: install
package:
- podman

- name: Set QM env
how: shell
script: |
cd tests/e2e
./run-test-e2e --skip-tests=yes
prepare+:
pengshanyu marked this conversation as resolved.
Show resolved Hide resolved
- name: Set QM environment
how: shell
order: 50
script: |
cd tests/e2e
./set-ffi-env-e2e "${FFI_SETUP_OPTIONS}"

execute:
how: tmt

report:
how: junit
how: junit
2 changes: 1 addition & 1 deletion plans/e2e/tier-1.fmf
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
summary: Tier 0 - QM Interconnect through bluechi
summary: Tier 1 - QM Interconnect through bluechi

discover:
how: fmf
Expand Down
17 changes: 17 additions & 0 deletions plans/main.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
summary: general data used by the test plans

environment:
FFI_SETUP_OPTIONS: none

prepare:
- name: Install podman
how: install
order: 20
package:
- podman

adjust:
- when: run == manual
environment+:
# Sample code to use manual packit repo
PACKIT_COPR_PROJECT: packit/containers-qm-291
11 changes: 11 additions & 0 deletions tests/e2e/set-ffi-env-e2e
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,17 @@ cat > /etc/bluechi/agent.conf.d/00-default.conf << 'EOF'
[bluechi-agent]
NodeName=localrootfs
EOF

controller_host_ip=$(hostname -I | awk '{print $1}')
qm_bluechi_agent_config_file="/etc/qm/bluechi/agent.conf.d/agent.conf"
if [[ -f "${qm_bluechi_agent_config_file}" ]]; then
if ! grep "ControllerHost=${controller_host_ip}" "${qm_bluechi_agent_config_file}" >/dev/null; then
sed -i '$a \ControllerHost='"${controller_host_ip}"'' ${qm_bluechi_agent_config_file}
fi
else
echo "Configuration file not found: ${qm_bluechi_agent_config_file}"
fi

# Enable services
info_message "Setup qm services, enable bluechi services"
info_message "=============================="
Expand Down
4 changes: 2 additions & 2 deletions tests/qm-connectivity/main.fmf
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/tier0:
/multi-bluechi-agents:
summary: Test is calling e2e/lib/tests as stand alone test
test: ./test.sh
tier: 0
tag: multi-bluechi-agents
id: 7356c7cc-95aa-4a8e-9b02-2726f570add6
/tier1:
summary: Test is calling e2e/lib/tests AutoSD container
Expand Down
17 changes: 17 additions & 0 deletions tests/qm-sanity-test/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# QM sanity test

This test suite do some basic sanity tests for qm to confirm that qm has been installed properly and the qm environment has been started successfully.

## This Test Suite includes these tests

1. Confirm that bluechi-controller is active and bluechi-agent is online.

2. Confirm that host and QM bluechi-agent are connected to bluechi-controller.

3. Confirm that qm is up and running.

4. Confirm that podman in qm is ok.

5. Confirm that podman run and exec container in qm with service file successfully.

6. Confirm that /var partition exist.
6 changes: 6 additions & 0 deletions tests/qm-sanity-test/check_bluechi_controller_is_ok.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
summary: Test bluechi-controller is active and bluechi-agent is online
test: /bin/bash ./check_bluechi_controller_is_ok.sh
duration: 10m
tier: 0
framework: shell
id: 44d1f92b-2885-49d3-900a-23cde296c9d8
33 changes: 33 additions & 0 deletions tests/qm-sanity-test/check_bluechi_controller_is_ok.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash -euvx

# shellcheck disable=SC1091
source ../e2e/lib/utils


# Verify bluechi-controller is up and bluechictl is ok
check_bluechi_controller_is_ok(){
if [ "$(systemctl is-active bluechi-controller)" == "active" ]; then
info_message "check_bluechi_controller_is_ok(): bluechi-controller is active."
info_message "PASS: check_bluechi_controller_is_ok()"
else
info_message "FAIL: check_bluechi_controller_is_ok(): bluechi-controller is not active."
exit 1
fi

regex_qm_localrootfs="qm.localrootfs * \| online"
regex_ASIL_localrootfs="localrootfs * \| online"
if [[ ! "$(bluechictl status)" =~ ${regex_qm_localrootfs} ]]; then
info_message "FAIL: check_bluechi_controller_is_ok: Checking QM bluechi-agent online failed.\n $(bluechictl status)"
exit 1
elif [[ ! "$(bluechictl status)" =~ ${regex_ASIL_localrootfs} ]]; then
info_message "FAIL: check_bluechi_controller_is_ok: Checking host bluechi-agent online failed.\n $(bluechictl status)"
exit 1
else
info_message "check_bluechi_controller_is_ok: QM bluechi-agent is online."
info_message "check_bluechi_controller_is_ok: host bluechi-agent is online."
info_message "PASS: check_bluechi_controller_is_ok()"
exit 0
fi
}

check_bluechi_controller_is_ok
6 changes: 6 additions & 0 deletions tests/qm-sanity-test/check_bluechi_is_ok.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
summary: Test host and QM bluechi-agent are connected to controller
test: /bin/bash ./check_bluechi_is_ok.sh
duration: 10m
tier: 0
framework: shell
id: 627318fa-b7bb-4d95-ab64-524911398a88
27 changes: 27 additions & 0 deletions tests/qm-sanity-test/check_bluechi_is_ok.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash -euvx

# shellcheck disable=SC1091
source ../e2e/lib/utils


# Verify bluechi nodes are connected
check_bluechi_is_ok(){
bluechi_controller_status=$(systemctl status bluechi-controller | tail -2)
regex_ASIL_bluechi_agent="Registered managed node from fd [0-9]{1,2} as 'localrootfs'"
regex_QM_bluechi_agent="Registered managed node from fd [0-9]{1,2} as 'qm.localrootfs'"

if [[ ! "${bluechi_controller_status}" =~ ${regex_ASIL_bluechi_agent} ]]; then
info_message "FAIL: check_bluechi_is_ok: host bluechi-agent is not connected to controller.\n ${bluechi_controller_status}"
exit 1
elif [[ ! "${bluechi_controller_status}" =~ ${regex_QM_bluechi_agent} ]]; then
info_message "FAIL: check_bluechi_is_ok: QM bluechi-agent is not connected to controller.\n ${bluechi_controller_status}"
exit 1
else
info_message "check_bluechi_is_ok: host bluechi-agent is connected to controller."
info_message "check_bluechi_is_ok: QM bluechi-agent is connected to controller."
info_message "PASS: check_bluechi_is_ok()"
exit 0
fi
}

check_bluechi_is_ok
6 changes: 6 additions & 0 deletions tests/qm-sanity-test/check_qm_is_up.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
summary: Test qm is up and running
test: /bin/bash ./check_qm_is_up.sh
duration: 10m
tier: 0
framework: shell
id: b82a5766-275b-4635-9c2a-7ab3d8c6dc05
18 changes: 18 additions & 0 deletions tests/qm-sanity-test/check_qm_is_up.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash -euvx

# shellcheck disable=SC1091
source ../e2e/lib/utils

# Verify qm is up and running
check_qm_is_up(){
if [ "$(systemctl is-active qm)" == "active" ]; then
info_message "check_qm_is_up(): qm is active"
info_message "PASS: check_qm_is_up()"
exit 0
else
info_message "FAIL: check_qm_is_up(): qm is not active"
exit 1
fi
}

check_qm_is_up
6 changes: 6 additions & 0 deletions tests/qm-sanity-test/check_qm_podman_is_ok.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
summary: Test podman in qm is ok
test: /bin/bash ./check_qm_podman_is_ok.sh
duration: 10m
tier: 0
framework: shell
id: ac4aac07-094b-4d22-b5f6-b85bc39da119
18 changes: 18 additions & 0 deletions tests/qm-sanity-test/check_qm_podman_is_ok.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash -euvx

# shellcheck disable=SC1091
source ../e2e/lib/utils

# Verify podman in qm is ok
check_qm_podman_is_ok(){
if podman exec qm bash -c "podman info" > /dev/null; then
info_message "check_qm_podman_is_ok(): check 'podman info' in qm successfully."
info_message "PASS: check_qm_podman_is_ok()"
exit 0
else
info_message "FAIL: check_qm_podman_is_ok(): check 'podman info' in qm failed.\n $(podman exec qm bash -c "podman info")"
exit 1
fi
}

check_qm_podman_is_ok
6 changes: 6 additions & 0 deletions tests/qm-sanity-test/check_qm_podman_quadlet_is_ok.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
summary: Test podman run and exec container in qm with service file
test: /bin/bash ./check_qm_podman_quadlet_is_ok.sh
duration: 10m
tier: 0
framework: shell
id: b2071703-f9d4-4945-aa2d-0cf3abeaecb7
40 changes: 40 additions & 0 deletions tests/qm-sanity-test/check_qm_podman_quadlet_is_ok.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/bin/bash -euvx

# shellcheck disable=SC1091
source ../e2e/lib/utils

# Verify podman run and exec container inside qm with service file
check_qm_podman_quadlet_is_ok(){
info_message "check_qm_podman_quadlet_is_ok(): \
prepare quadlet files for qm-sanity-test.container"
cat > "/etc/qm/containers/systemd/qm-sanity-test.container" <<EOF
[Unit]
Description=the qm-sanity-test sleep container
After=local-fs.target

[Container]
Image=registry.access.redhat.com/ubi9-minimal:latest
Exec=sleep 1000

[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
EOF

info_message "check_qm_podman_quadlet_is_ok(): qm-sanity-test container reload & restart"
exec_cmd_with_pass_info "podman exec qm systemctl daemon-reload"
exec_cmd_with_pass_info "podman exec qm systemctl start qm-sanity-test"
exec_cmd_with_pass_info "podman exec qm systemctl status qm-sanity-test | grep -i started"
exec_cmd_with_pass_info "podman exec qm podman run alpine echo Hello QM"

info_message "PASS: check_qm_podman_quadlet_is_ok()"
exit 0
}

exec_cmd_with_pass_info(){
local command="$1"
exec_cmd "${command}"
info_message "PASS: Command ${command} successful"
}

check_qm_podman_quadlet_is_ok
Loading