Skip to content

Commit

Permalink
test: Replace setup/teardown
Browse files Browse the repository at this point in the history
  • Loading branch information
martinpitt committed Feb 29, 2024
1 parent e6b5b15 commit c32b6d1
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 48 deletions.
8 changes: 2 additions & 6 deletions test/browser/browser.sh
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,8 @@ for retry in $(seq 5); do
sleep $((5 * retry * retry))
done

# copy images for user podman tests; podman insists on user session
loginctl enable-linger $(id -u admin)
for img in localhost/test-alpine localhost/test-busybox localhost/test-registry; do
podman save $img | sudo -i -u admin podman load
done
loginctl disable-linger $(id -u admin)
# image setup, shared with upstream tests
$TESTS/../vm.install

systemctl enable --now cockpit.socket podman.socket

Expand Down
59 changes: 23 additions & 36 deletions test/check-application
Original file line number Diff line number Diff line change
Expand Up @@ -99,32 +99,27 @@ class TestApplication(testlib.MachineCase):
def setUp(self):
super().setUp()
m = self.machine
m.execute("""
systemctl stop podman.service; systemctl --now enable podman.socket
# Ensure podman is really stopped, otherwise it keeps the containers/ directory busy
pkill -e -9 podman || true
while pgrep podman; do sleep 0.1; done
pkill -e -9 conmon || true
while pgrep conmon; do sleep 0.1; done
findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount
sync
""")

# backup/restore pristine podman state, so that tests can run on existing testbeds
self.restore_dir("/var/lib/containers")

# HACK: sometimes podman leaks mounts
self.addCleanup(m.execute, """
systemctl stop podman.service podman.socket
systemctl reset-failed podman.service podman.socket
podman system reset --force
pkill -e -9 podman || true
while pgrep podman; do sleep 0.1; done
pkill -e -9 conmon || true
while pgrep conmon; do sleep 0.1; done
findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount
sync
""")
# clean slate, and install our test images
INIT = """for img in /var/lib/test-images/*.tar; do podman load < "$img"; done
systemctl {user} --now enable podman.socket"""
m.execute(INIT.format(user=""))

CLEANUP = """set -x;
systemctl {user} stop podman.service podman.socket
systemctl {user} reset-failed podman.service podman.socket
podman system reset --force
# HACK: podman bug
pkill -u "$(id -u)" -e -9 conmon || true
while pgrep -u "$(id -u)" conmon; do sleep 0.1; done
"""

# assert that this worked properly
CLEANUP_CHECK = "! ps ux | grep -E '[p]odman|[c]onmon'"

self.addCleanup(m.execute, CLEANUP_CHECK, stdout=None)
self.addCleanup(m.execute, CLEANUP.format(user=""), stdout=None)

# Create admin session
m.execute("""
Expand All @@ -141,17 +136,9 @@ class TestApplication(testlib.MachineCase):
identity_file=m.identity_file)

# Enable user service as well
self.admin_s.execute("systemctl --user stop podman.service; systemctl --now --user enable podman.socket")
self.restore_dir("/home/admin/.local/share/containers")
self.addCleanup(self.admin_s.execute, "systemctl --user stop podman.service podman.socket || true")
# Ubuntu 22.04 has old podman that does not know about --time
if m.image == 'ubuntu-2204':
self.addCleanup(self.admin_s.execute, "podman rm --force --all", timeout=300)
self.addCleanup(self.admin_s.execute, "podman pod rm --force --all", timeout=300)
else:
self.addCleanup(self.admin_s.execute, "podman rm --force --time 0 --all")
self.addCleanup(self.admin_s.execute, "podman pod rm --force --time 0 --all")

self.admin_s.execute(INIT.format(user="--user"))
self.addCleanup(self.admin_s.execute, CLEANUP_CHECK, stdout=None)
self.addCleanup(self.admin_s.execute, CLEANUP.format(user="--user"), stdout=None)
# But disable it globally so that "systemctl --user disable" does what we expect
m.execute("systemctl --global disable podman.socket")

Expand Down
11 changes: 5 additions & 6 deletions test/vm.install
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,12 @@ fi
# Since 4.0 podman now ships the pause image
podman images --format '{{.Repository}}:{{.Tag}}' | grep -Ev 'localhost/test-|pause|cockpit/ws' | xargs -r podman rmi -f

# copy images for user podman tests; podman insists on user session
loginctl enable-linger $(id -u admin)
images=$(podman images --format '{{.Repository}}:{{.Tag}}')
for img in $images; do
podman save $img | sudo -i -u admin podman load
# tests reset podman, save the images
mkdir -p /var/lib/test-images
for img in $(podman images --format '{{.Repository}}:{{.Tag}}'); do
fname="$(echo "$img" | tr -dc '[a-zA-Z-]')"
podman save -o "/var/lib/test-images/${fname}.tar" "$img"
done
loginctl disable-linger $(id -u admin)

# 15minutes after boot tmp files are removed and podman stores some tmp lock files
systemctl disable --now systemd-tmpfiles-clean.timer
Expand Down

0 comments on commit c32b6d1

Please sign in to comment.