diff --git a/test/browser/browser.sh b/test/browser/browser.sh index a40c7ed37..0d5c581b2 100755 --- a/test/browser/browser.sh +++ b/test/browser/browser.sh @@ -74,12 +74,8 @@ for retry in $(seq 5); do sleep $((5 * retry * retry)) done -# copy images for user podman tests; podman insists on user session -loginctl enable-linger $(id -u admin) -for img in localhost/test-alpine localhost/test-busybox localhost/test-registry; do - podman save $img | sudo -i -u admin podman load -done -loginctl disable-linger $(id -u admin) +# image setup, shared with upstream tests +$TESTS/../vm.install systemctl enable --now cockpit.socket podman.socket diff --git a/test/check-application b/test/check-application index 9d790b2a5..57927f9df 100755 --- a/test/check-application +++ b/test/check-application @@ -99,32 +99,29 @@ class TestApplication(testlib.MachineCase): def setUp(self): super().setUp() m = self.machine - m.execute(""" - systemctl stop podman.service; systemctl --now enable podman.socket - # Ensure podman is really stopped, otherwise it keeps the containers/ directory busy - pkill -e -9 podman || true - while pgrep podman; do sleep 0.1; done - pkill -e -9 conmon || true - while pgrep conmon; do sleep 0.1; done - findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount - sync - """) - # backup/restore pristine podman state, so that tests can run on existing testbeds - self.restore_dir("/var/lib/containers") - - # HACK: sometimes podman leaks mounts - self.addCleanup(m.execute, """ - systemctl stop podman.service podman.socket - systemctl reset-failed podman.service podman.socket - podman system reset --force - pkill -e -9 podman || true - while pgrep podman; do sleep 0.1; done - pkill -e -9 conmon || true - while pgrep conmon; do sleep 0.1; done - findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount - sync - """) + # clean slate, and install our test images + INIT = """for img in /var/lib/test-images/*.tar; do podman load < "$img"; done + systemctl {user} --now enable podman.socket""" + m.execute(INIT.format(user="")) + + # ideally `system reset` should clean up everything + # HACK: system reset has 10s timeout, make that faster with an extra `stop` + # https://github.com/containers/podman/issues/21874 + CLEANUP = """systemctl {user} stop podman.service podman.socket + systemctl {user} reset-failed podman.service podman.socket + podman stop --time 0 --all + podman pod stop --time 0 --all + podman system reset --force + """ + + # assert that this worked properly + CLEANUP_CHECK = """! pgrep -a -u "$(id -u)" podman + ! pgrep -a -u "$(id -u)" conmon + """ + + self.addCleanup(m.execute, CLEANUP_CHECK, stdout=None) + self.addCleanup(m.execute, CLEANUP.format(user=""), stdout=None) # Create admin session m.execute(""" @@ -141,17 +138,9 @@ class TestApplication(testlib.MachineCase): identity_file=m.identity_file) # Enable user service as well - self.admin_s.execute("systemctl --user stop podman.service; systemctl --now --user enable podman.socket") - self.restore_dir("/home/admin/.local/share/containers") - self.addCleanup(self.admin_s.execute, "systemctl --user stop podman.service podman.socket || true") - # Ubuntu 22.04 has old podman that does not know about --time - if m.image == 'ubuntu-2204': - self.addCleanup(self.admin_s.execute, "podman rm --force --all", timeout=300) - self.addCleanup(self.admin_s.execute, "podman pod rm --force --all", timeout=300) - else: - self.addCleanup(self.admin_s.execute, "podman rm --force --time 0 --all") - self.addCleanup(self.admin_s.execute, "podman pod rm --force --time 0 --all") - + self.admin_s.execute(INIT.format(user="--user")) + self.addCleanup(self.admin_s.execute, CLEANUP_CHECK, stdout=None) + self.addCleanup(self.admin_s.execute, CLEANUP.format(user="--user"), stdout=None) # But disable it globally so that "systemctl --user disable" does what we expect m.execute("systemctl --global disable podman.socket") diff --git a/test/vm.install b/test/vm.install index eb2cd9890..f6c52cf53 100755 --- a/test/vm.install +++ b/test/vm.install @@ -26,13 +26,12 @@ fi # Since 4.0 podman now ships the pause image podman images --format '{{.Repository}}:{{.Tag}}' | grep -Ev 'localhost/test-|pause|cockpit/ws' | xargs -r podman rmi -f -# copy images for user podman tests; podman insists on user session -loginctl enable-linger $(id -u admin) -images=$(podman images --format '{{.Repository}}:{{.Tag}}') -for img in $images; do - podman save $img | sudo -i -u admin podman load +# tests reset podman, save the images +mkdir -p /var/lib/test-images +for img in $(podman images --format '{{.Repository}}:{{.Tag}}'); do + fname="$(echo "$img" | tr -dc '[a-zA-Z-]')" + podman save -o "/var/lib/test-images/${fname}.tar" "$img" done - loginctl disable-linger $(id -u admin) # 15minutes after boot tmp files are removed and podman stores some tmp lock files systemctl disable --now systemd-tmpfiles-clean.timer