From 375688837c3849cdce81130e961ff4a01f59423d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 00:30:17 +0200 Subject: [PATCH 01/33] tests/integ/network: add type annotations Make PyCharm understand what mixin those objects are for. --- qubes/tests/integ/network.py | 142 +++++++++++++++++++++++++++++++---- 1 file changed, 126 insertions(+), 16 deletions(-) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 30f4f32e3..733c4b064 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -50,6 +50,9 @@ class VmNetworkingMixin(object): template = None def run_cmd(self, vm, cmd, user="root"): + ''' + :type self: qubes.tests.SystemTestCase | VmNetworkingMixin + ''' try: self.loop.run_until_complete(vm.run_for_stdio(cmd, user=user)) except subprocess.CalledProcessError as e: @@ -57,6 +60,9 @@ def run_cmd(self, vm, cmd, user="root"): return 0 def check_nc_version(self, vm): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' if self.run_cmd(vm, 'nc -h >/dev/null 2>&1') != 0: self.skipTest('nc not installed') if self.run_cmd(vm, 'nc -h 2>&1|grep -q nmap.org') == 0: @@ -65,6 +71,9 @@ def check_nc_version(self, vm): return NcVersion.Trad def setUp(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' super(VmNetworkingMixin, self).setUp() if self.template.startswith('whonix-'): self.skipTest("Test not supported here - Whonix uses its own " @@ -86,6 +95,9 @@ def setUp(self): def configure_netvm(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' def run_netvm_cmd(cmd): if self.run_cmd(self.testnetvm, cmd) != 0: self.fail("Command '%s' failed" % cmd) @@ -113,12 +125,18 @@ def run_netvm_cmd(cmd): def test_000_simple_networking(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.loop.run_until_complete(self.testvm1.start()) self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0) self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0) def test_010_simple_proxyvm(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -144,6 +162,9 @@ def test_010_simple_proxyvm(self): @unittest.skipUnless(spawn.find_executable('xdotool'), "xdotool not installed") def test_020_simple_proxyvm_nm(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -189,6 +210,9 @@ def test_020_simple_proxyvm_nm(self): def test_030_firewallvm_firewall(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -290,6 +314,9 @@ def test_030_firewallvm_firewall(self): def test_040_inter_vm(self): + ''' + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -327,7 +354,10 @@ def test_040_inter_vm(self): self.ping_cmd.format(target=self.testvm1.ip)), 0) def test_050_spoof_ip(self): - """Test if VM IP spoofing is blocked""" + '''Test if VM IP spoofing is blocked + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.loop.run_until_complete(self.testvm1.start()) self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0) @@ -353,7 +383,10 @@ def test_050_spoof_ip(self): self.assertEquals(packets, '0', 'Some packet hit the INPUT rule') def test_100_late_xldevd_startup(self): - """Regression test for #1990""" + '''Regression test for #1990 + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' # Simulater late xl devd startup cmd = "systemctl stop xendriverdomain" if self.run_cmd(self.testnetvm, cmd) != 0: @@ -367,7 +400,10 @@ def test_100_late_xldevd_startup(self): self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0) def test_200_fake_ip_simple(self): - '''Test hiding VM real IP''' + '''Test hiding VM real IP + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.testvm1.features['net.fake-ip'] = '192.168.1.128' self.testvm1.features['net.fake-gateway'] = '192.168.1.1' self.testvm1.features['net.fake-netmask'] = '255.255.255.0' @@ -398,7 +434,10 @@ def test_200_fake_ip_simple(self): self.assertNotIn(str(self.testvm1.netvm.ip), output) def test_201_fake_ip_without_gw(self): - '''Test hiding VM real IP''' + '''Test hiding VM real IP + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.testvm1.features['net.fake-ip'] = '192.168.1.128' self.app.save() self.loop.run_until_complete(self.testvm1.start()) @@ -417,7 +456,10 @@ def test_201_fake_ip_without_gw(self): self.assertNotIn(str(self.testvm1.ip), output) def test_202_fake_ip_firewall(self): - '''Test hiding VM real IP, firewall''' + '''Test hiding VM real IP, firewall + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.testvm1.features['net.fake-ip'] = '192.168.1.128' self.testvm1.features['net.fake-gateway'] = '192.168.1.1' self.testvm1.features['net.fake-netmask'] = '255.255.255.0' @@ -468,7 +510,10 @@ def test_202_fake_ip_firewall(self): self.loop.run_until_complete(nc.wait()) def test_203_fake_ip_inter_vm_allow(self): - '''Access VM with "fake IP" from other VM (when firewall allows)''' + '''Access VM with "fake IP" from other VM (when firewall allows) + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -521,7 +566,10 @@ def test_203_fake_ip_inter_vm_allow(self): 'Packets didn\'t managed to the VM') def test_204_fake_ip_proxy(self): - '''Test hiding VM real IP''' + '''Test hiding VM real IP + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -582,7 +630,10 @@ def test_204_fake_ip_proxy(self): self.assertNotIn(str(self.proxy.ip), output) def test_210_custom_ip_simple(self): - '''Custom AppVM IP''' + '''Custom AppVM IP + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.testvm1.ip = '192.168.1.1' self.app.save() self.loop.run_until_complete(self.testvm1.start()) @@ -590,7 +641,10 @@ def test_210_custom_ip_simple(self): self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0) def test_211_custom_ip_proxy(self): - '''Custom ProxyVM IP''' + '''Custom ProxyVM IP + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -607,7 +661,10 @@ def test_211_custom_ip_proxy(self): self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0) def test_212_custom_ip_firewall(self): - '''Custom VM IP and firewall''' + '''Custom VM IP and firewall + + :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + ''' self.testvm1.ip = '192.168.1.1' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, @@ -666,6 +723,9 @@ def setUp(self): self.ping6_name = self.ping6_cmd.format(target=self.test_name) def configure_netvm(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.testnetvm.features['ipv6'] = True super(VmIPv6NetworkingMixin, self).configure_netvm() @@ -683,12 +743,18 @@ def run_netvm_cmd(cmd): format(ip=self.test_ip, ip6=self.test_ip6, name=self.test_name)) def test_500_ipv6_simple_networking(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.loop.run_until_complete(self.testvm1.start()) self.assertEqual(self.run_cmd(self.testvm1, self.ping6_ip), 0) self.assertEqual(self.run_cmd(self.testvm1, self.ping6_name), 0) def test_510_ipv6_simple_proxyvm(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -714,6 +780,9 @@ def test_510_ipv6_simple_proxyvm(self): @unittest.skipUnless(spawn.find_executable('xdotool'), "xdotool not installed") def test_520_ipv6_simple_proxyvm_nm(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -764,6 +833,9 @@ def test_520_ipv6_simple_proxyvm_nm(self): def test_530_ipv6_firewallvm_firewall(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -881,6 +953,9 @@ def test_530_ipv6_firewallvm_firewall(self): def test_540_ipv6_inter_vm(self): + ''' + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -920,7 +995,10 @@ def test_540_ipv6_inter_vm(self): def test_550_ipv6_spoof_ip(self): - """Test if VM IP spoofing is blocked""" + '''Test if VM IP spoofing is blocked + + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.loop.run_until_complete(self.testvm1.start()) self.assertEqual(self.run_cmd(self.testvm1, self.ping6_ip), 0) @@ -949,7 +1027,10 @@ def test_550_ipv6_spoof_ip(self): self.assertEquals(packets, '0', 'Some packet hit the INPUT rule') def test_710_ipv6_custom_ip_simple(self): - '''Custom AppVM IP''' + '''Custom AppVM IP + + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.testvm1.ip6 = '2000:aaaa:bbbb::1' self.app.save() self.loop.run_until_complete(self.testvm1.start()) @@ -957,7 +1038,10 @@ def test_710_ipv6_custom_ip_simple(self): self.assertEqual(self.run_cmd(self.testvm1, self.ping6_name), 0) def test_711_ipv6_custom_ip_proxy(self): - '''Custom ProxyVM IP''' + '''Custom ProxyVM IP + + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('proxy'), label='red') @@ -974,7 +1058,10 @@ def test_711_ipv6_custom_ip_proxy(self): self.assertEqual(self.run_cmd(self.testvm1, self.ping6_name), 0) def test_712_ipv6_custom_ip_firewall(self): - '''Custom VM IP and firewall''' + '''Custom VM IP and firewall + + :type self: qubes.tests.SystemTestCase | VmIPv6NetworkingMixin + ''' self.testvm1.ip6 = '2000:aaaa:bbbb::1' self.proxy = self.app.add_new_vm(qubes.vm.appvm.AppVM, @@ -1099,6 +1186,10 @@ class VmUpdatesMixin(object): ) def run_cmd(self, vm, cmd, user="root"): + ''' + + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' try: self.loop.run_until_complete(vm.run_for_stdio(cmd)) except subprocess.CalledProcessError as e: @@ -1106,6 +1197,9 @@ def run_cmd(self, vm, cmd, user="root"): return 0 def setUp(self): + ''' + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' if not self.template.count('debian') and \ not self.template.count('fedora'): self.skipTest("Template {} not supported by this test".format( @@ -1142,6 +1236,9 @@ def setUp(self): self.loop.run_until_complete(self.testvm1.create_on_disk()) def test_000_simple_update(self): + ''' + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' self.app.save() # reload the VM to have all the properties properly set (especially # default netvm) @@ -1155,6 +1252,9 @@ def test_000_simple_update(self): '{}: {}\n{}'.format(self.update_cmd, stdout, stderr)) def create_repo_apt(self): + ''' + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' pkg_file_name = "test-pkg_1.0-1_amd64.deb" self.loop.run_until_complete(self.netvm_repo.run_for_stdio(''' mkdir /tmp/apt-repo \ @@ -1209,6 +1309,9 @@ def create_repo_apt(self): ''')) def create_repo_yum(self): + ''' + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' pkg_file_name = "test-pkg-1.0-1.fc21.x86_64.rpm" self.loop.run_until_complete(self.netvm_repo.run_for_stdio(''' mkdir /tmp/yum-repo \ @@ -1221,6 +1324,9 @@ def create_repo_yum(self): 'createrepo /tmp/yum-repo')) def create_repo_and_serve(self): + ''' + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' if self.template.count("debian") or self.template.count("whonix"): self.create_repo_apt() self.loop.run_until_complete(self.netvm_repo.run( @@ -1242,6 +1348,8 @@ def configure_test_repo(self): The critical part is to use "localhost" - this will work only when accessed through update proxy and this is exactly what we want to test here. + + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin """ if self.template.count("debian") or self.template.count("whonix"): @@ -1266,9 +1374,11 @@ def configure_test_repo(self): self.template)) def test_010_update_via_proxy(self): - """ + ''' Test both whether updates proxy works and whether is actually used by the VM - """ + + :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + ''' if self.template.count("minimal"): self.skipTest("Template {} not supported by this test".format( self.template)) From 15140255d5244e9f3b5e0e9f31c573d23ec98546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 00:38:38 +0200 Subject: [PATCH 02/33] tests/integ/network: few more code style improvement Remove unused imports and unused variables, add some more docstrings. --- qubes/tests/integ/network.py | 40 +++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 733c4b064..980deec09 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -22,8 +22,6 @@ from distutils import spawn import asyncio -import multiprocessing -import os import subprocess import sys import time @@ -31,13 +29,15 @@ import qubes.tests import qubes.firewall +import qubes.vm.qubesvm import qubes.vm.appvm class NcVersion: Trad = 1 Nmap = 2 -# noinspection PyAttributeOutsideInit + +# noinspection PyAttributeOutsideInit,PyPep8Naming class VmNetworkingMixin(object): test_ip = '192.168.123.45' test_name = 'test.example.com' @@ -50,8 +50,12 @@ class VmNetworkingMixin(object): template = None def run_cmd(self, vm, cmd, user="root"): - ''' + '''Run a command *cmd* in a *vm* as *user*. Return its exit code. :type self: qubes.tests.SystemTestCase | VmNetworkingMixin + :param qubes.vm.qubesvm.QubesVM vm: VM object to run command in + :param str cmd: command to execute + :param std user: user to execute command as + :return int: command exit code ''' try: self.loop.run_until_complete(vm.run_for_stdio(cmd, user=user)) @@ -62,6 +66,7 @@ def run_cmd(self, vm, cmd, user="root"): def check_nc_version(self, vm): ''' :type self: qubes.tests.SystemTestCase | VMNetworkingMixin + :param vm: VM where check ncat version in ''' if self.run_cmd(vm, 'nc -h >/dev/null 2>&1') != 0: self.skipTest('nc not installed') @@ -535,9 +540,9 @@ def test_203_fake_ip_inter_vm_allow(self): self.loop.run_until_complete(self.testvm1.start()) self.loop.run_until_complete(self.testvm2.start()) + cmd = 'iptables -I FORWARD -s {} -d {} -j ACCEPT'.format( + self.testvm2.ip, self.testvm1.ip) try: - cmd = 'iptables -I FORWARD -s {} -d {} -j ACCEPT'.format( - self.testvm2.ip, self.testvm1.ip) self.loop.run_until_complete(self.proxy.run_for_stdio( cmd, user='root')) except subprocess.CalledProcessError as e: @@ -593,7 +598,7 @@ def test_204_fake_ip_proxy(self): (output, _) = self.loop.run_until_complete( self.proxy.run_for_stdio( 'ip addr show dev eth0', user='root')) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: self.fail('ip addr show dev eth0 failed') output = output.decode() self.assertIn('192.168.1.128', output) @@ -603,7 +608,7 @@ def test_204_fake_ip_proxy(self): (output, _) = self.loop.run_until_complete( self.proxy.run_for_stdio( 'ip route show', user='root')) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: self.fail('ip route show failed') output = output.decode() self.assertIn('192.168.1.1', output) @@ -613,7 +618,7 @@ def test_204_fake_ip_proxy(self): (output, _) = self.loop.run_until_complete( self.testvm1.run_for_stdio( 'ip addr show dev eth0', user='root')) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: self.fail('ip addr show dev eth0 failed') output = output.decode() self.assertNotIn('192.168.1.128', output) @@ -623,7 +628,7 @@ def test_204_fake_ip_proxy(self): (output, _) = self.loop.run_until_complete( self.testvm1.run_for_stdio( 'ip route show', user='root')) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: self.fail('ip route show failed') output = output.decode() self.assertIn('192.168.1.128', output) @@ -712,6 +717,7 @@ def test_212_custom_ip_firewall(self): nc.terminate() self.loop.run_until_complete(nc.wait()) +# noinspection PyAttributeOutsideInit,PyPep8Naming class VmIPv6NetworkingMixin(VmNetworkingMixin): test_ip6 = '2000:abcd::1' @@ -903,7 +909,8 @@ def test_530_ipv6_firewallvm_firewall(self): # block all except target self.testvm1.firewall.rules = [ - qubes.firewall.Rule(None, action='accept', dsthost=self.test_ip6, + qubes.firewall.Rule(None, action='accept', + dsthost=self.test_ip6, proto='tcp', dstports=1234), ] self.testvm1.firewall.save() @@ -1109,7 +1116,7 @@ def test_712_ipv6_custom_ip_firewall(self): nc.terminate() self.loop.run_until_complete(nc.wait()) -# noinspection PyAttributeOutsideInit +# noinspection PyAttributeOutsideInit,PyPep8Naming class VmUpdatesMixin(object): """ Tests for VM updates @@ -1186,9 +1193,13 @@ class VmUpdatesMixin(object): ) def run_cmd(self, vm, cmd, user="root"): - ''' + '''Run a command *cmd* in a *vm* as *user*. Return its exit code. :type self: qubes.tests.SystemTestCase | VmUpdatesMixin + :param qubes.vm.qubesvm.QubesVM vm: VM object to run command in + :param str cmd: command to execute + :param std user: user to execute command as + :return int: command exit code ''' try: self.loop.run_until_complete(vm.run_for_stdio(cmd)) @@ -1375,7 +1386,8 @@ def configure_test_repo(self): def test_010_update_via_proxy(self): ''' - Test both whether updates proxy works and whether is actually used by the VM + Test both whether updates proxy works and whether is actually used + by the VM :type self: qubes.tests.SystemTestCase | VmUpdatesMixin ''' From 3f5618dbb0aa7004252ba4239e8073280ec16c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 00:39:32 +0200 Subject: [PATCH 03/33] tests/integ/network: make the tests independend of default netvm Network tests create own temporary netvm. Make it disconnected from the real netvm, to not interefere in tests. --- qubes/tests/integ/network.py | 1 + 1 file changed, 1 insertion(+) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 980deec09..524dd5b88 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -89,6 +89,7 @@ def setUp(self): label='red') self.loop.run_until_complete(self.testnetvm.create_on_disk()) self.testnetvm.provides_network = True + self.testnetvm.netvm = None self.testvm1 = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.make_vm_name('vm1'), label='red') From f1621c01e907d339e8a3fbbd978d5f162fbf0b96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 05:08:25 +0200 Subject: [PATCH 04/33] tests: add search based on window class to wait_for_window Searching based on class is used in many tests, searching by class, not only by name in wait_for_window will allow to reduce code duplication. While at it, improve it additionally: - avoid active waiting for window and use `xdotool search --sync` instead - return found window id - add wait_for_window_coro() for use where coroutine is needed - when waiting for window to disappear, check window id once and wait for that particular window to disappear (avoid xdotool race conditions on window enumeration) Besides reducing code duplication, this also move various xdotool imperfections handling into one place. --- qubes/tests/__init__.py | 82 +++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 12 deletions(-) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index 6bdf5977c..c3f3a2fd6 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -977,7 +977,25 @@ def qrexec_policy(self, service, source, destination, allow=True, return _QrexecPolicyContext(service, source, destination, allow=allow, action=action) - def wait_for_window(self, title, timeout=30, show=True): + @asyncio.coroutine + def wait_for_window_hide_coro(self, title, winid, timeout=30): + """ + Wait for window do disappear + :param winid: window id + :return: + """ + wait_count = 0 + while subprocess.call(['xdotool', 'getwindowname', str(winid)], + stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) == 0: + wait_count += 1 + if wait_count > timeout * 10: + self.fail("Timeout while waiting for {}({}) window to " + "disappear".format(title, winid)) + yield from asyncio.sleep(0.1) + + @asyncio.coroutine + def wait_for_window_coro(self, title, search_class=False, timeout=30, + show=True): """ Wait for a window with a given title. Depending on show parameter, it will wait for either window to show or to disappear. @@ -986,19 +1004,59 @@ def wait_for_window(self, title, timeout=30, show=True): :param timeout: timeout of the operation, in seconds :param show: if True - wait for the window to be visible, otherwise - to not be visible - :return: None + :param search_class: search based on window class instead of title + :return: window id of found window, if show=True """ - wait_count = 0 - while subprocess.call(['xdotool', 'search', '--name', title], - stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) \ - != int(not show): - wait_count += 1 - if wait_count > timeout*10: - self.fail("Timeout while waiting for {} window to {}".format( - title, "show" if show else "hide") - ) - self.loop.run_until_complete(asyncio.sleep(0.1)) + xdotool_search = ['xdotool', 'search', '--onlyvisible'] + if search_class: + xdotool_search.append('--class') + else: + xdotool_search.append('--name') + if show: + xdotool_search.append('--sync') + if not show: + try: + winid = subprocess.check_output(xdotool_search + [title], + stderr=subprocess.DEVNULL).decode() + except subprocess.CalledProcessError: + # already gone + return + yield from self.wait_for_window_hide_coro(winid, title, + timeout=timeout) + return + + winid = None + while not winid: + p = yield from asyncio.create_subprocess_exec( + *xdotool_search, title, + stderr=subprocess.DEVNULL, stdout=subprocess.PIPE) + try: + (winid, _) = yield from asyncio.wait_for( + p.communicate(), timeout) + # don't check exit code, getting winid on stdout is enough + # indicator of success; specifically ignore xdotool failing + # with BadWindow or such - when some window appears only for a + # moment by xdotool didn't manage to get its properties + except asyncio.TimeoutError: + self.fail( + "Timeout while waiting for {} window to show".format(title)) + return winid.decode().strip() + + def wait_for_window(self, *args, **kwargs): + """ + Wait for a window with a given title. Depending on show parameter, + it will wait for either window to show or to disappear. + + :param title: title of the window to wait for + :param timeout: timeout of the operation, in seconds + :param show: if True - wait for the window to be visible, + otherwise - to not be visible + :param search_class: search based on window class instead of title + :return: window id of found window, if show=True + """ + return self.loop.run_until_complete( + self.wait_for_window_coro(*args, **kwargs)) def enter_keys_in_window(self, title, keys): """ From 9e81087b252bdd194ff6fb6b1010744f39194706 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 05:16:29 +0200 Subject: [PATCH 05/33] tests: use improved wait_for_window in various tests Replace manual `xdotool search calls` with wait_for_window(), where compatible. --- qubes/tests/integ/dispvm.py | 46 ++++++++++-------------- qubes/tests/integ/vm_qrexec_gui.py | 58 +++++------------------------- 2 files changed, 27 insertions(+), 77 deletions(-) diff --git a/qubes/tests/integ/dispvm.py b/qubes/tests/integ/dispvm.py index af6f4f7b2..77c47cd3d 100644 --- a/qubes/tests/integ/dispvm.py +++ b/qubes/tests/integ/dispvm.py @@ -255,34 +255,26 @@ def test_030_edit_file(self): p = self.loop.run_until_complete( self.testvm1.run("qvm-open-in-dvm /home/user/test.txt")) - wait_count = 0 + # if first 5 windows isn't expected editor, there is no hope winid = None - while True: - search = self.loop.run_until_complete( - asyncio.create_subprocess_exec( - 'xdotool', 'search', '--onlyvisible', '--class', - 'disp[0-9]*', - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL)) - stdout, _ = self.loop.run_until_complete(search.communicate()) - if search.returncode == 0: - winid = stdout.strip() - # get window title - (window_title, _) = subprocess.Popen( - ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE). \ - communicate() - window_title = window_title.decode().strip() - # ignore LibreOffice splash screen and window with no title - # set yet - if window_title and not window_title.startswith("LibreOffice")\ - and not window_title == 'VMapp command' \ - and 'whonixcheck' not in window_title \ - and not window_title == 'NetworkManager Applet': - break - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for editor window") - self.loop.run_until_complete(asyncio.sleep(0.3)) + for _ in range(5): + winid = self.wait_for_window('disp[0-9]*', search_class=True) + # get window title + (window_title, _) = subprocess.Popen( + ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE). \ + communicate() + window_title = window_title.decode().strip() + # ignore LibreOffice splash screen and window with no title + # set yet + if window_title and not window_title.startswith("LibreOffice")\ + and not window_title == 'VMapp command' \ + and 'whonixcheck' not in window_title \ + and not window_title == 'NetworkManager Applet': + break + self.loop.run_until_complete(asyncio.sleep(1)) + winid = None + if winid is None: + self.fail('Timeout waiting for editor window') time.sleep(0.5) self._handle_editor(winid) diff --git a/qubes/tests/integ/vm_qrexec_gui.py b/qubes/tests/integ/vm_qrexec_gui.py index a847a71d8..7b87e7867 100644 --- a/qubes/tests/integ/vm_qrexec_gui.py +++ b/qubes/tests/integ/vm_qrexec_gui.py @@ -76,32 +76,17 @@ def test_010_run_xterm(self): self.loop.run_until_complete(self.wait_for_session(self.testvm1)) p = self.loop.run_until_complete(self.testvm1.run('xterm')) try: - wait_count = 0 title = 'user@{}'.format(self.testvm1.name) if self.template.count("whonix"): title = 'user@host' - while subprocess.call( - ['xdotool', 'search', '--name', title], - stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) > 0: - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for xterm window") - self.loop.run_until_complete(asyncio.sleep(0.1)) + self.wait_for_window(title) self.loop.run_until_complete(asyncio.sleep(0.5)) subprocess.check_call( ['xdotool', 'search', '--name', title, 'windowactivate', 'type', 'exit\n']) - wait_count = 0 - while subprocess.call(['xdotool', 'search', '--name', title], - stdout=open(os.path.devnull, 'w'), - stderr=subprocess.STDOUT) == 0: - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for xterm " - "termination") - self.loop.run_until_complete(asyncio.sleep(0.1)) + self.wait_for_window(title, show=False) finally: try: p.terminate() @@ -124,15 +109,7 @@ def test_011_run_gnome_terminal(self): title = 'user@{}'.format(self.testvm1.name) if self.template.count("whonix"): title = 'user@host' - wait_count = 0 - while subprocess.call( - ['xdotool', 'search', '--name', title], - stdout=open(os.path.devnull, 'w'), - stderr=subprocess.STDOUT) > 0: - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for gnome-terminal window") - self.loop.run_until_complete(asyncio.sleep(0.1)) + self.wait_for_window(title) self.loop.run_until_complete(asyncio.sleep(0.5)) subprocess.check_call( @@ -178,30 +155,14 @@ def test_012_qubes_desktop_run(self): title = 'user@{}'.format(self.testvm1.name) if self.template.count("whonix"): title = 'user@host' - wait_count = 0 - while subprocess.call( - ['xdotool', 'search', '--name', title], - stdout=open(os.path.devnull, 'w'), - stderr=subprocess.STDOUT) > 0: - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for xterm window") - self.loop.run_until_complete(asyncio.sleep(0.1)) + self.wait_for_window(title) self.loop.run_until_complete(asyncio.sleep(0.5)) subprocess.check_call( ['xdotool', 'search', '--name', title, 'windowactivate', '--sync', 'type', 'exit\n']) - wait_count = 0 - while subprocess.call(['xdotool', 'search', '--name', title], - stdout=open(os.path.devnull, 'w'), - stderr=subprocess.STDOUT) == 0: - wait_count += 1 - if wait_count > 100: - self.fail("Timeout while waiting for xterm " - "termination") - self.loop.run_until_complete(asyncio.sleep(0.1)) + self.wait_for_window(title, show=False) def test_050_qrexec_simple_eof(self): """Test for data and EOF transmission dom0->VM""" @@ -1111,15 +1072,12 @@ def _test_300_bug_1028_gui_memory_pinning(self): proc = yield from self.testvm1.run( 'xterm -maximized -e top') - # help xdotool a little... - yield from asyncio.sleep(2) if proc.returncode is not None: self.fail('xterm failed to start') # get window ID - winid = (yield from asyncio.get_event_loop().run_in_executor(None, - subprocess.check_output, - ['xdotool', 'search', '--sync', '--onlyvisible', '--class', - self.testvm1.name + ':xterm'])).decode() + winid = yield from self.wait_for_window_coro( + self.testvm1.name + ':xterm', + search_class=True) xprop = yield from asyncio.get_event_loop().run_in_executor(None, subprocess.check_output, ['xprop', '-notype', '-id', winid, '_QUBES_VMWINDOWID']) From fd9f2e2a6c7360bab0391dc2b2ba58e3006cb6b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 05:17:19 +0200 Subject: [PATCH 06/33] tests: type commands into specific found window Make sure events are sent to specific window found with xdotool search, not the one having the focus. In case of Whonix, it can be first connection wizard or whonixcheck report. --- qubes/tests/integ/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qubes/tests/integ/basic.py b/qubes/tests/integ/basic.py index fee89ab6f..6d535e7e9 100644 --- a/qubes/tests/integ/basic.py +++ b/qubes/tests/integ/basic.py @@ -108,7 +108,7 @@ def test_120_start_standalone_with_cdrom_dom0(self): self.assertTrue(self.vm.is_running()) # Type 'poweroff' subprocess.check_call(['xdotool', 'search', '--name', self.vm.name, - 'type', 'poweroff\r']) + 'type', '--window', '%1', 'poweroff\r']) for _ in range(10): if not self.vm.is_running(): break @@ -674,7 +674,7 @@ def test_121_start_standalone_with_cdrom_vm(self): self.assertTrue(self.vm.is_running()) # Type 'poweroff' subprocess.check_call(['xdotool', 'search', '--name', self.vm.name, - 'type', 'poweroff\r']) + 'type', '--window', '%1', 'poweroff\r']) for _ in range(10): if not self.vm.is_running(): break From e8dc6cb916f7828c34e71a135e7ada7586f52ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 05:24:24 +0200 Subject: [PATCH 07/33] tests: use smaller root.img in backupcompatibility tests 1GB image easily exceed available space on openQA instances. Use 100MB instead. --- qubes/tests/integ/backupcompatibility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qubes/tests/integ/backupcompatibility.py b/qubes/tests/integ/backupcompatibility.py index c7a7f6c4e..88c3708a6 100644 --- a/qubes/tests/integ/backupcompatibility.py +++ b/qubes/tests/integ/backupcompatibility.py @@ -237,7 +237,7 @@ def create_v1_files(self, r2b2=False): self.create_sparse(self.fullpath( "vm-templates/test-template-clone/root.img"), 10*2**30) self.fill_image(self.fullpath( - "vm-templates/test-template-clone/root.img"), 1*2**30, True) + "vm-templates/test-template-clone/root.img"), 100*2**20, True) self.create_volatile_img(self.fullpath( "vm-templates/test-template-clone/volatile.img")) subprocess.check_call([ From 133219f6d390b9bb2fdf3831951d38a4ea4911c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 15 Oct 2018 06:05:05 +0200 Subject: [PATCH 08/33] Do not generate R3 compat firewall rules if R4 format is supported R3 format had limitation of ~40 rules per VM. Do not generate compat rules (possibly hitting that limitation) if new format, free of that limitation is supported. Fixes QubesOS/qubes-issues#1570 Fixes QubesOS/qubes-issues#4228 --- qubes/ext/r3compatibility.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/qubes/ext/r3compatibility.py b/qubes/ext/r3compatibility.py index acc1dd089..2fa8ec3ad 100644 --- a/qubes/ext/r3compatibility.py +++ b/qubes/ext/r3compatibility.py @@ -80,6 +80,9 @@ def on_firewall_changed(self, vm, event): def write_iptables_qubesdb_entry(self, firewallvm): # pylint: disable=no-self-use + # skip compatibility rules if new format support is advertised + if firewallvm.features.check_with_template('qubes-firewall', False): + return firewallvm.untrusted_qdb.rm("/qubes-iptables-domainrules/") iptables = "# Generated by Qubes Core on {0}\n".format( datetime.datetime.now().ctime()) From c01ae06feee4c933aefd7d282afd5fcfccaafad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Wed, 17 Oct 2018 17:37:02 +0200 Subject: [PATCH 09/33] tests: add basic ServicesExtension tests --- qubes/tests/ext.py | 55 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/qubes/tests/ext.py b/qubes/tests/ext.py index bb937f68a..ef4e7252b 100644 --- a/qubes/tests/ext.py +++ b/qubes/tests/ext.py @@ -21,6 +21,7 @@ from unittest import mock import qubes.ext.core_features +import qubes.ext.services import qubes.ext.windows import qubes.tests @@ -214,3 +215,57 @@ def test_002_notify_tools_other_os(self): 'qrexec': '1', 'os': 'Linux'}) self.assertEqual(self.vm.mock_calls, []) + +class TC_20_Services(qubes.tests.QubesTestCase): + def setUp(self): + super().setUp() + self.ext = qubes.ext.services.ServicesExtension() + self.vm = mock.MagicMock() + self.features = {} + self.vm.configure_mock(**{ + 'is_running.return_value': True, + 'features.get.side_effect': self.features.get, + 'features.items.side_effect': self.features.items, + 'features.__iter__.side_effect': self.features.__iter__, + 'features.__contains__.side_effect': self.features.__contains__, + 'features.__setitem__.side_effect': self.features.__setitem__, + 'features.__delitem__.side_effect': self.features.__delitem__, + }) + + def test_000_write_to_qdb(self): + self.features['service.test1'] = '1' + self.features['service.test2'] = '' + + self.ext.on_domain_qdb_create(self.vm, 'domain-qdb-create') + self.assertEqual(sorted(self.vm.untrusted_qdb.mock_calls), [ + ('write', ('/qubes-service/test1', '1'), {}), + ('write', ('/qubes-service/test2', '0'), {}), + ]) + + def test_001_feature_set(self): + self.ext.on_domain_feature_set(self.vm, + 'feature-set:service.test_no_oldvalue', + 'service.test_no_oldvalue', '1') + self.ext.on_domain_feature_set(self.vm, + 'feature-set:service.test_oldvalue', + 'service.test_oldvalue', '1', '') + self.ext.on_domain_feature_set(self.vm, + 'feature-set:service.test_disable', + 'service.test_disable', '', '1') + self.ext.on_domain_feature_set(self.vm, + 'feature-set:service.test_disable_no_oldvalue', + 'service.test_disable_no_oldvalue', '') + + self.assertEqual(sorted(self.vm.untrusted_qdb.mock_calls), sorted([ + ('write', ('/qubes-service/test_no_oldvalue', '1'), {}), + ('write', ('/qubes-service/test_oldvalue', '1'), {}), + ('write', ('/qubes-service/test_disable', '0'), {}), + ('write', ('/qubes-service/test_disable_no_oldvalue', '0'), {}), + ])) + + def test_002_feature_delete(self): + self.ext.on_domain_feature_delete(self.vm, + 'feature-delete:service.test3', 'service.test3') + self.assertEqual(sorted(self.vm.untrusted_qdb.mock_calls), [ + ('rm', ('/qubes-service/test3',), {}), + ]) From ba210c41ee644a31fa605266bc06880b49301274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Thu, 18 Oct 2018 00:01:45 +0200 Subject: [PATCH 10/33] qubesvm: don't crash VM creation if icon symlink already exists It can be leftover from previous failed attempt. Don't crash on it, and replace it instead. QubesOS/qubes-issues#3438 --- qubes/vm/qubesvm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index 7955297e4..165df53e3 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -1439,6 +1439,8 @@ def create_on_disk(self, pool=None, pools=None): 'creation'.format(self.dir_path)) raise + if os.path.exists(self.icon_path): + os.unlink(self.icon_path) self.log.info('Creating icon symlink: {} -> {}'.format( self.icon_path, self.label.icon_path)) if hasattr(os, "symlink"): From 58bcec2a64e5c49bf42633776f4502a5615dfee8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Thu, 18 Oct 2018 00:03:05 +0200 Subject: [PATCH 11/33] qubesvm: improve error message about same-pool requirement Make it clear that volume creation fails because it needs to be in the same pool as its parent. This message is shown in context of `qvm-create -p root=MyPool` for example and the previous message didn't make sense at all. Fixes QubesOS/qubes-issues#3438 --- qubes/vm/qubesvm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index 165df53e3..7b0237769 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -1990,7 +1990,9 @@ def _patch_pool_config(config, pool=None, pools=None): if not is_snapshot: config['pool'] = str(pools[name]) else: - msg = "Can't clone a snapshot volume {!s} to pool {!s} " \ + msg = "Snapshot volume {0!s} must be in the same pool as its " \ + "origin ({0!s} volume of template)," \ + "cannot move to pool {1!s} " \ .format(name, pools[name]) raise qubes.exc.QubesException(msg) return config From d1f5cb5d15d7cd39bcffec9d0cb743d75b2df470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Thu, 18 Oct 2018 05:44:08 +0200 Subject: [PATCH 12/33] ext/services: mechanism for advertising supported services Support 'supported-service.*' features requests coming from VMs. Set such features directly (allow only value '1') and remove any not reported in given call. This way uninstalling package providing given service will automatically remove related 'supported-service...' feature. Fixes QubesOS/qubes-issues#4402 --- qubes/ext/services.py | 37 +++++++++++++++++++++++++++++++++++++ qubes/tests/ext.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/qubes/ext/services.py b/qubes/ext/services.py index 77e94cdb0..b09e2fda2 100644 --- a/qubes/ext/services.py +++ b/qubes/ext/services.py @@ -62,3 +62,40 @@ def on_domain_feature_delete(self, vm, event, feature): return service = feature[len('service.'):] vm.untrusted_qdb.rm('/qubes-service/{}'.format(service)) + + @qubes.ext.handler('features-request') + def supported_services(self, vm, event, untrusted_features): + '''Handle advertisement of supported services''' + # pylint: disable=no-self-use,unused-argument + + if getattr(vm, 'template', None): + vm.log.warning( + 'Ignoring qubes.FeaturesRequest from template-based VM') + return + + new_supported_services = set() + for requested_service in untrusted_features: + if not requested_service.startswith('supported-service.'): + continue + if untrusted_features[requested_service] == '1': + # only allow to advertise service as supported, lack of entry + # means service is not supported + new_supported_services.add(requested_service) + del untrusted_features + + # if no service is supported, ignore the whole thing - do not clear + # all services in case of empty request (manual or such) + if not new_supported_services: + return + + old_supported_services = set( + feat for feat in vm.features + if feat.startswith('supported-service.') and vm.features[feat]) + + for feature in new_supported_services.difference( + old_supported_services): + vm.features[feature] = True + + for feature in old_supported_services.difference( + new_supported_services): + del vm.features[feature] diff --git a/qubes/tests/ext.py b/qubes/tests/ext.py index ef4e7252b..5acd183fc 100644 --- a/qubes/tests/ext.py +++ b/qubes/tests/ext.py @@ -223,6 +223,7 @@ def setUp(self): self.vm = mock.MagicMock() self.features = {} self.vm.configure_mock(**{ + 'template': None, 'is_running.return_value': True, 'features.get.side_effect': self.features.get, 'features.items.side_effect': self.features.items, @@ -269,3 +270,38 @@ def test_002_feature_delete(self): self.assertEqual(sorted(self.vm.untrusted_qdb.mock_calls), [ ('rm', ('/qubes-service/test3',), {}), ]) + + def test_010_supported_services(self): + self.ext.supported_services(self.vm, 'features-request', + untrusted_features={ + 'supported-service.test1': '1', # ok + 'supported-service.test2': '0', # ignored + 'supported-service.test3': 'some text', # ignored + 'no-service': '1', # ignored + }) + self.assertEqual(self.features, { + 'supported-service.test1': True, + }) + + def test_011_supported_services_add(self): + self.features['supported-service.test1'] = '1' + self.ext.supported_services(self.vm, 'features-request', + untrusted_features={ + 'supported-service.test1': '1', # ok + 'supported-service.test2': '1', # ok + }) + # also check if existing one is untouched + self.assertEqual(self.features, { + 'supported-service.test1': '1', + 'supported-service.test2': True, + }) + + def test_012_supported_services_remove(self): + self.features['supported-service.test1'] = '1' + self.ext.supported_services(self.vm, 'features-request', + untrusted_features={ + 'supported-service.test2': '1', # ok + }) + self.assertEqual(self.features, { + 'supported-service.test2': True, + }) From 295705a708b0f46fa514ccc4053dba927dd368cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Thu, 18 Oct 2018 06:30:38 +0200 Subject: [PATCH 13/33] doc: document features, qvm-features-request and services Fixes QubesOS/qubes-issues#2829 --- doc/index.rst | 1 + doc/qubes-features.rst | 193 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100644 doc/qubes-features.rst diff --git a/doc/index.rst b/doc/index.rst index 5d0e71d45..b375f67bd 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -16,6 +16,7 @@ manpages and API documentation. For primary user documentation, see qubes qubes-vm/index qubes-events + qubes-features qubes-storage qubes-exc qubes-ext diff --git a/doc/qubes-features.rst b/doc/qubes-features.rst new file mode 100644 index 000000000..a334cff9c --- /dev/null +++ b/doc/qubes-features.rst @@ -0,0 +1,193 @@ +:py:class:`qubes.vm.Features` - Qubes VM features, services +============================================================ + +Features are generic mechanism for storing key-value pairs attached to a +VM. The primary use case for them is data storage for extensions (you can think +of them as more flexible properties, defined by extensions), but some are also +used in the qubes core itself. There is no definite list of supported features, +each extension can set their own and there is no requirement of registration, +but :program:`qvm-features` man page contains well known ones. +In addition, there is a mechanism for VM request setting a feature. This is +useful for extensions to discover if its VM part is present. + +Features can have three distinct values: no value (not present in mapping, +which is closest thing to :py:obj:`None`), empty string (which is +interpreted as :py:obj:`False`) and non-empty string, which is +:py:obj:`True`. Anything assigned to the mapping is coerced to strings, +however if you assign instances of :py:class:`bool`, they are converted as +described above. Be aware that assigning the number `0` (which is considered +false in Python) will result in string `'0'`, which is considered true. + +:py:class:`qubes.vm.Features` inherits from :py:class:`dict`, so provide all the +standard functions to get, list and set values. Additionally provide helper +functions to check if given feature is set on the VM and default to the value +on the VM's template or netvm. This is useful for features which nature is +inherited from other VMs, like "is package X is installed" or "is VM behind a +VPN". + +Example usage of features in extension: + +.. code-block:: python + + import qubes.exc + import qubes.ext + + class ExampleExtension(qubes.ext.Extension): + @qubes.ext.handler('domain-pre-start') + def on_domain_start(self, vm, event, **kwargs): + if vm.features.get('do-not-start', False): + raise qubes.exc.QubesVMError(vm, + 'Start prohibited because of do-not-start feature') + + if vm.features.check_with_template('something-installed', False): + # do something + +The above extension does two things: + + - prevent starting a qube with ``do-not-start`` feature set + - do something when ``something-installed`` feature is set on the qube, or its template + + +qvm-features-request, qubes.PostInstall service +------------------------------------------------ + +When some package in the VM want to request feature to be set (aka advertise +support for it), it should place a shell script in ``/etc/qubes/post-install.d``. +This script should call :program:`qvm-features-request` with ``FEATURE=VALUE`` pair(s) as +arguments to request those features. It is recommended to use very simple +values here (for example ``1``). The script should be named in form +``XX-package-name.sh`` where ``XX`` is two-digits number below 90 and +``package-name`` is unique name specific to this package (preferably actual +package name). The script needs executable bit set. + +``qubes.PostInstall`` service will call all those scripts after any package +installation and also after initial template installation. +This way package have a chance to report to dom0 if any feature is +added/removed. + +The features flow to dom0 according to the diagram below. Important part is +that qubes core :py:class:`qubes.ext.Extension` is responsible for handling such request in +``features-request`` event handler. If no extension handles given feature request, +it will be ignored. The extension should carefuly validate requested +features (ignoring those not recognized - may be for another extension) and +only then set appropriate value on VM object +(:py:attr:`qubes.vm.BaseVM.features`). It is recommended to make the +verification code as bulletproof as possible (for example allow only specific +simple values, instead of complex structures), because feature requests +come from untrusted sources. The features actually set on the VM in some cases +may not be necessary those requested. Similar for values. + +.. graphviz:: + + digraph { + + "qubes.PostInstall"; + "/etc/qubes/post-install.d/ scripts"; + "qvm-features-request"; + "qubes.FeaturesRequest"; + "qubes core extensions"; + "VM features"; + + "qubes.PostInstall" -> "/etc/qubes/post-install.d/ scripts"; + "/etc/qubes/post-install.d/ scripts" -> "qvm-features-request" + [xlabel="each script calls"]; + "qvm-features-request" -> "qubes.FeaturesRequest" + [xlabel="last script call the service to dom0"]; + "qubes.FeaturesRequest" -> "qubes core extensions" + [xlabel="features-request event"]; + "qubes core extensions" -> "VM features" + [xlabel="verification"]; + + } + +Example ``/etc/qubes/post-install.d/20-example.sh`` file: + +.. code-block:: shell + + #!/bin/sh + + qvm-features-request example-feature=1 + +Example extension handling the above: + +.. code-block:: python + + import qubes.ext + + class ExampleExtension(qubes.ext.Extension): + # the last argument must be named untrusted_features + @qubes.ext.handler('features-request') + def on_features_request(self, vm, event, untrusted_features): + # don't allow TemplateBasedVMs to request the feature - should be + # requested by the template instead + if hasattr(vm, 'template'): + return + + untrusted_value = untrusted_features.get('example-feature', None) + # check if feature is advertised and verify its value + if untrusted_value != '1': + return + value = untrusted_value + + # and finally set the value + vm.features['example-feature'] = value + +Services +--------- + +`Qubes services `_ are implemented +as features with ``service.`` prefix. The +:py:class:`qubes.ext.services.ServicesExtension` enumerate all the features +in form of ``service.`` prefix and write them to QubesDB as +``/qubes-service/`` and value either ``0`` or ``1``. +VM startup scripts list those entries for for each with value of ``1``, create +``/var/run/qubes-service/`` file. Then, it can be conveniently +used by other scripts to check whether dom0 wishes service to be enabled or +disabled. + +VM package can advertise what services are supported. For that, it needs to +request ``supported-service.`` feature with value ``1`` according +to description above. The :py:class:`qubes.ext.services.ServicesExtension` will +handle such request and set this feature on VM object. ``supported-service.`` +features that stop being advertised with ``qvm-features-request`` call are +removed. This way, it's enough to remove the file from +``/etc/qubes/post-install.d`` (for example by uninstalling package providing +the service) to tell dom0 the service is no longer supported. Services +advertised by TemplateBasedVMs are currently ignored (related +``supported-service.`` features are not set), but retrieving them may be added +in the future. Applications checking for specific service support should use +``vm.features.check_with_template('supported-service.', False)`` +call on desired VM object. When enumerating all supported services, application +should consider both the vm and its template (if any). + +Various tools will use this information to discover if given service is +supported. The API does not enforce service being first advertised before being +enabled (means: there can be service which is enabled, but without matching +``supported-service.`` feature). The list of well known services is in +:program:`qvm-service` man page. + +Example ``/etc/qubes/post-install.d/20-my-service.sh``: + +.. code-block:: shell + + #!/bin/sh + + qvm-features-request supported-service.my-service=1 + +Services and features can be then inspected from dom0 using +:program:`qvm-features` tool, for example: + +.. code-block:: shell + + $ qvm-features my-qube + supported-service.my-service 1 + +Module contents +--------------- + +.. autoclass:: qubes.vm.Features + :members: + :show-inheritance: + +.. vim: ts=3 sw=3 et + From 6170edb291fa016ba978fd97a4317999c2736b7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 19 Oct 2018 01:29:03 +0200 Subject: [PATCH 14/33] storage: allow import_data and import_data_end be coroutines On some storage pools this operation can also be time consuming - for example require creating temporary volume, and volume.create() already can be a coroutine. This is also requirement for making common code used by start()/create() etc be a coroutine, otherwise neither of them can be and will block other operations. Related to QubesOS/qubes-issues#4283 --- qubes/api/admin.py | 2 +- qubes/api/internal.py | 3 ++- qubes/storage/__init__.py | 22 ++++++++++++++++++---- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/qubes/api/admin.py b/qubes/api/admin.py index 60e0fa034..5a2b765e4 100644 --- a/qubes/api/admin.py +++ b/qubes/api/admin.py @@ -479,7 +479,7 @@ def vm_volume_import(self): if not self.dest.is_halted(): raise qubes.exc.QubesVMNotHaltedError(self.dest) - path = self.dest.storage.import_data(self.arg) + path = yield from self.dest.storage.import_data(self.arg) assert ' ' not in path size = self.dest.volumes[self.arg].size diff --git a/qubes/api/internal.py b/qubes/api/internal.py index 0773a98fe..3af5848fe 100644 --- a/qubes/api/internal.py +++ b/qubes/api/internal.py @@ -68,7 +68,8 @@ def vm_volume_import_end(self, untrusted_payload): success = untrusted_payload == b'ok' try: - self.dest.storage.import_data_end(self.arg, success=success) + yield from self.dest.storage.import_data_end(self.arg, + success=success) except: self.dest.fire_event('domain-volume-import-end', volume=self.arg, success=False) diff --git a/qubes/storage/__init__.py b/qubes/storage/__init__.py index c81832a94..2a6afaa37 100644 --- a/qubes/storage/__init__.py +++ b/qubes/storage/__init__.py @@ -198,6 +198,8 @@ def import_data(self): volume data require something more than just writing to a file ( for example connecting to some other domain, or converting data on the fly), the returned path may be a pipe. + + This can be implemented as a coroutine. ''' raise self._not_implemented("import") @@ -207,6 +209,8 @@ def import_data_end(self, success): This method is called regardless the operation was successful or not. + This can be implemented as a coroutine. + :param success: True if data import was successful, otherwise False ''' # by default do nothing @@ -654,24 +658,34 @@ def export(self, volume): return self.vm.volumes[volume].export() + @asyncio.coroutine def import_data(self, volume): ''' Helper function to import volume data (pool.import_data(volume))''' assert isinstance(volume, (Volume, str)), \ "You need to pass a Volume or pool name as str" if isinstance(volume, Volume): - return volume.import_data() + ret = volume.import_data() + else: + ret = self.vm.volumes[volume].import_data() - return self.vm.volumes[volume].import_data() + if asyncio.iscoroutine(ret): + ret = yield from ret + return ret + @asyncio.coroutine def import_data_end(self, volume, success): ''' Helper function to finish/cleanup data import (pool.import_data_end( volume))''' assert isinstance(volume, (Volume, str)), \ "You need to pass a Volume or pool name as str" if isinstance(volume, Volume): - return volume.import_data_end(success=success) + ret = volume.import_data_end(success=success) + else: + ret = self.vm.volumes[volume].import_data_end(success=success) - return self.vm.volumes[volume].import_data_end(success=success) + if asyncio.iscoroutine(ret): + ret = yield from ret + return ret class VolumesCollection: From 299c5146474fd654cf152bb7a913f7c97642f47e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 19 Oct 2018 02:28:09 +0200 Subject: [PATCH 15/33] tests: fix asyncio usage in storage_lvm.TC_01_ThinPool Both vm.create_on_disk() and vm.start() are coroutines. Tests in this class didn't run them, so basically didn't test anything. Wrap couroutine calls with self.loop.run_until_complete(). Additionally, don't fail if LVM pool is named differently. In that case, the test is rather sily, as it probably use the same pool for source and destination (operation already tested elsewhere). But it isn't a reason for failing the test. --- qubes/tests/storage_lvm.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/qubes/tests/storage_lvm.py b/qubes/tests/storage_lvm.py index f6b67d5b1..3bead18ef 100644 --- a/qubes/tests/storage_lvm.py +++ b/qubes/tests/storage_lvm.py @@ -946,26 +946,27 @@ def test_004_import(self): vm = self.app.add_new_vm(qubes.vm.templatevm.TemplateVM, name=name, label='red') vm.clone_properties(template_vm) - vm.clone_disk_files(template_vm, pool='test-lvm') + self.loop.run_until_complete( + vm.clone_disk_files(template_vm, pool=self.pool.name)) for v_name, volume in vm.volumes.items(): if volume.save_on_stop: expected = "/dev/{!s}/vm-{!s}-{!s}".format( DEFAULT_LVM_POOL.split('/')[0], vm.name, v_name) self.assertEqual(volume.path, expected) with self.assertNotRaises(qubes.exc.QubesException): - vm.start() + self.loop.run_until_complete(vm.start()) def test_005_create_appvm(self): vm = self.app.add_new_vm(cls=qubes.vm.appvm.AppVM, name=self.make_vm_name('appvm'), label='red') - vm.create_on_disk(pool='test-lvm') + self.loop.run_until_complete(vm.create_on_disk(pool=self.pool.name)) for v_name, volume in vm.volumes.items(): if volume.save_on_stop: expected = "/dev/{!s}/vm-{!s}-{!s}".format( DEFAULT_LVM_POOL.split('/')[0], vm.name, v_name) self.assertEqual(volume.path, expected) with self.assertNotRaises(qubes.exc.QubesException): - vm.start() + self.loop.run_until_complete(vm.start()) @skipUnlessLvmPoolExists class TC_02_StorageHelpers(ThinPoolBase): From b65fdf97005c2dbea3070d6e19aec5cf12b1160c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 19 Oct 2018 02:38:47 +0200 Subject: [PATCH 16/33] storage: convert lvm driver to async version LVM operations can take significant amount of time. This is especially visible when stopping a VM (`vm.storage.stop()`) - in that time the whole qubesd freeze for about 2 seconds. Fix this by making all the ThinVolume methods a coroutines (where supported). Each public coroutine is also wrapped with locking on volume._lock to avoid concurrency-related problems. This all also require changing internal helper functions to coroutines. There are two functions that still needs to be called from non-coroutine call sites: - init_cache/reset_cache (initial cache fill, ThinPool.setup()) - qubes_lvm (ThinVolume.export() So, those two functions need to live in two variants. Extract its common code to separate functions to reduce code duplications. Fixes QubesOS/qubes-issues#4283 --- qubes/storage/lvm.py | 240 ++++++++++++++++++++++++++----------- qubes/tests/storage_lvm.py | 156 ++++++++++++------------ 2 files changed, 245 insertions(+), 151 deletions(-) diff --git a/qubes/storage/lvm.py b/qubes/storage/lvm.py index 178019dd0..8943bb063 100644 --- a/qubes/storage/lvm.py +++ b/qubes/storage/lvm.py @@ -18,7 +18,7 @@ # ''' Driver for storing vm images in a LVM thin pool ''' - +import functools import logging import os import subprocess @@ -195,26 +195,14 @@ def usage(self): return 0 -def init_cache(log=logging.getLogger('qubes.storage.lvm')): - cmd = ['lvs', '--noheadings', '-o', - 'vg_name,pool_lv,name,lv_size,data_percent,lv_attr,origin', - '--units', 'b', '--separator', ';'] - if os.getuid() != 0: - cmd.insert(0, 'sudo') - environ = os.environ.copy() - environ['LC_ALL'] = 'C.utf8' - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - close_fds=True, env=environ) - out, err = p.communicate() - return_code = p.returncode - if return_code == 0 and err: - log.warning(err) - elif return_code != 0: - raise qubes.storage.StoragePoolException(err) +_init_cache_cmd = ['lvs', '--noheadings', '-o', + 'vg_name,pool_lv,name,lv_size,data_percent,lv_attr,origin', + '--units', 'b', '--separator', ';'] +def _parse_lvm_cache(lvm_output): result = {} - for line in out.splitlines(): + for line in lvm_output.splitlines(): line = line.decode().strip() pool_name, pool_lv, name, size, usage_percent, attr, \ origin = line.split(';', 6) @@ -228,6 +216,42 @@ def init_cache(log=logging.getLogger('qubes.storage.lvm')): return result +def init_cache(log=logging.getLogger('qubes.storage.lvm')): + cmd = _init_cache_cmd + if os.getuid() != 0: + cmd.insert(0, 'sudo') + environ = os.environ.copy() + environ['LC_ALL'] = 'C.utf8' + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + close_fds=True, env=environ) + out, err = p.communicate() + return_code = p.returncode + if return_code == 0 and err: + log.warning(err) + elif return_code != 0: + raise qubes.storage.StoragePoolException(err) + + return _parse_lvm_cache(out) + +@asyncio.coroutine +def init_cache_coro(log=logging.getLogger('qubes.storage.lvm')): + cmd = _init_cache_cmd + if os.getuid() != 0: + cmd = ['sudo'] + cmd + environ = os.environ.copy() + environ['LC_ALL'] = 'C.utf8' + p = yield from asyncio.create_subprocess_exec(*cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, env=environ) + out, err = yield from p.communicate() + return_code = p.returncode + if return_code == 0 and err: + log.warning(err) + elif return_code != 0: + raise qubes.storage.StoragePoolException(err) + + return _parse_lvm_cache(out) size_cache = init_cache() @@ -243,6 +267,21 @@ def _revision_sort_key(revision): revision = revision.split('-')[0] return int(revision) +def locked(method): + '''Decorator running given Volume's coroutine under a lock. + Needs to be added after wrapping with @asyncio.coroutine, for example: + + >>>@locked + >>>@asyncio.coroutine + >>>def start(self): + >>> pass + ''' + @asyncio.coroutine + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + with (yield from self._lock): # pylint: disable=protected-access + return (yield from method(self, *args, **kwargs)) + return wrapper class ThinVolume(qubes.storage.Volume): ''' Default LVM thin volume implementation @@ -260,6 +299,7 @@ def __init__(self, volume_group, size=0, **kwargs): self._vid_import = self.vid + '-import' self._size = size + self._lock = asyncio.Lock() @property def path(self): @@ -307,6 +347,7 @@ def size(self, _): raise qubes.storage.StoragePoolException( "You shouldn't use lvm size setter") + @asyncio.coroutine def _reset(self): ''' Resets a volatile volume ''' assert not self.snap_on_start and not self.save_on_stop, \ @@ -314,14 +355,15 @@ def _reset(self): self.log.debug('Resetting volatile %s', self.vid) try: cmd = ['remove', self.vid] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) except qubes.storage.StoragePoolException: pass # pylint: disable=protected-access cmd = ['create', self.pool._pool_id, self.vid.split('/')[1], str(self.size)] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) + @asyncio.coroutine def _remove_revisions(self, revisions=None): '''Remove old volume revisions. @@ -342,10 +384,11 @@ def _remove_revisions(self, revisions=None): assert rev_id != self._vid_current try: cmd = ['remove', self.vid + '-' + rev_id] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) except qubes.storage.StoragePoolException: pass + @asyncio.coroutine def _commit(self, vid_to_commit=None, keep=False): ''' Commit temporary volume into current one. By default @@ -368,8 +411,7 @@ def _commit(self, vid_to_commit=None, keep=False): assert hasattr(self, '_vid_snap') vid_to_commit = self._vid_snap - # TODO: when converting this function to coroutine, this _must_ be - # under a lock + assert self._lock.locked() if not os.path.exists('/dev/' + vid_to_commit): # nothing to commit return @@ -377,21 +419,23 @@ def _commit(self, vid_to_commit=None, keep=False): if self._vid_current == self.vid: cmd = ['rename', self.vid, '{}-{}-back'.format(self.vid, int(time.time()))] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() cmd = ['clone' if keep else 'rename', vid_to_commit, self.vid] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() # make sure the one we've committed right now is properly # detected as the current one - before removing anything assert self._vid_current == self.vid # and remove old snapshots, if needed - self._remove_revisions() + yield from self._remove_revisions() + @locked + @asyncio.coroutine def create(self): assert self.vid assert self.size @@ -405,32 +449,34 @@ def create(self): self.vid.split('/', 1)[1], str(self.size) ] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() return self + @locked + @asyncio.coroutine def remove(self): assert self.vid try: if os.path.exists('/dev/' + self._vid_snap): cmd = ['remove', self._vid_snap] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) except AttributeError: pass try: if os.path.exists('/dev/' + self._vid_import): cmd = ['remove', self._vid_import] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) except AttributeError: pass - self._remove_revisions(self.revisions.keys()) + yield from self._remove_revisions(self.revisions.keys()) if not os.path.exists(self.path): return cmd = ['remove', self.path] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() # pylint: disable=protected-access self.pool._volume_objects_cache.pop(self.vid, None) @@ -441,6 +487,7 @@ def export(self): devpath = self.path return devpath + @locked @asyncio.coroutine def import_volume(self, src_volume): if not src_volume.save_on_stop: @@ -456,13 +503,13 @@ def import_volume(self, src_volume): # pylint: disable=line-too-long if isinstance(src_volume.pool, ThinPool) and \ src_volume.pool.thin_pool == self.pool.thin_pool: # NOQA - self._commit(src_volume.path[len('/dev/'):], keep=True) + yield from self._commit(src_volume.path[len('/dev/'):], keep=True) else: cmd = ['create', self.pool._pool_id, # pylint: disable=protected-access self._vid_import.split('/')[1], str(src_volume.size)] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) src_path = src_volume.export() cmd = ['dd', 'if=' + src_path, 'of=/dev/' + self._vid_import, 'conv=sparse', 'status=none'] @@ -474,14 +521,16 @@ def import_volume(self, src_volume): yield from p.wait() if p.returncode != 0: cmd = ['remove', self._vid_import] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) raise qubes.storage.StoragePoolException( 'Failed to import volume {!r}, dd exit code: {}'.format( src_volume, p.returncode)) - self._commit(self._vid_import) + yield from self._commit(self._vid_import) return self + @locked + @asyncio.coroutine def import_data(self): ''' Returns an object that can be `open()`. ''' if self.is_dirty(): @@ -492,21 +541,23 @@ def import_data(self): # pylint: disable=protected-access cmd = ['create', self.pool._pool_id, self._vid_import.split('/')[1], str(self.size)] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() devpath = '/dev/' + self._vid_import return devpath + @locked + @asyncio.coroutine def import_data_end(self, success): '''Either commit imported data, or discard temporary volume''' if not os.path.exists('/dev/' + self._vid_import): raise qubes.storage.StoragePoolException( 'No import operation in progress on {}'.format(self.vid)) if success: - self._commit(self._vid_import) + yield from self._commit(self._vid_import) else: cmd = ['remove', self._vid_import] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) def abort_if_import_in_progress(self): try: @@ -531,6 +582,8 @@ def is_outdated(self): return (size_cache[self._vid_snap]['origin'] != self.source.path.split('/')[-1]) + @locked + @asyncio.coroutine def revert(self, revision=None): if self.is_dirty(): raise qubes.storage.StoragePoolException( @@ -547,12 +600,14 @@ def revert(self, revision=None): if self.vid in size_cache: cmd = ['remove', self.vid] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) cmd = ['clone', self.vid + '-' + revision, self.vid] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() return self + @locked + @asyncio.coroutine def resize(self, size): ''' Expands volume, throws :py:class:`qubst.storage.qubes.storage.StoragePoolException` if @@ -574,20 +629,21 @@ def resize(self, size): if self.is_dirty(): cmd = ['extend', self._vid_snap, str(size)] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) elif hasattr(self, '_vid_import') and \ os.path.exists('/dev/' + self._vid_import): cmd = ['extend', self._vid_import, str(size)] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) elif self.save_on_stop or not self.snap_on_start: cmd = ['extend', self._vid_current, str(size)] - qubes_lvm(cmd, self.log) - reset_cache() + yield from qubes_lvm_coro(cmd, self.log) + yield from reset_cache_coro() + @asyncio.coroutine def _snapshot(self): try: cmd = ['remove', self._vid_snap] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) except: # pylint: disable=bare-except pass @@ -595,32 +651,36 @@ def _snapshot(self): cmd = ['clone', self._vid_current, self._vid_snap] else: cmd = ['clone', self.source.path, self._vid_snap] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) + @locked + @asyncio.coroutine def start(self): self.abort_if_import_in_progress() try: if self.snap_on_start or self.save_on_stop: if not self.save_on_stop or not self.is_dirty(): - self._snapshot() + yield from self._snapshot() else: - self._reset() + yield from self._reset() finally: - reset_cache() + yield from reset_cache_coro() return self + @locked + @asyncio.coroutine def stop(self): try: if self.save_on_stop: - self._commit() + yield from self._commit() if self.snap_on_start and not self.save_on_stop: cmd = ['remove', self._vid_snap] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) elif not self.snap_on_start and not self.save_on_stop: cmd = ['remove', self.vid] - qubes_lvm(cmd, self.log) + yield from qubes_lvm_coro(cmd, self.log) finally: - reset_cache() + yield from reset_cache_coro() return self def verify(self): @@ -671,9 +731,14 @@ def pool_exists(pool_id): except KeyError: return False +def _get_lvm_cmdline(cmd): + ''' Build command line for :program:`lvm` call. + The purpose of this function is to keep all the detailed lvm options in + one place. -def qubes_lvm(cmd, log=logging.getLogger('qubes.storage.lvm')): - ''' Call :program:`lvm` to execute an LVM operation ''' + :param cmd: array of str, where cmd[0] is action and the rest are arguments + :return array of str appropriate for subprocess.Popen + ''' action = cmd[0] if action == 'remove': lvm_cmd = ['lvremove', '-f', cmd[1]] @@ -698,28 +763,57 @@ def qubes_lvm(cmd, log=logging.getLogger('qubes.storage.lvm')): cmd = ['sudo', 'lvm'] + lvm_cmd else: cmd = ['lvm'] + lvm_cmd - environ = os.environ.copy() - environ['LC_ALL'] = 'C.utf8' - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - close_fds=True, env=environ) - out, err = p.communicate() - err = err.decode() + + return cmd + +def _process_lvm_output(returncode, stdout, stderr, log): + '''Process output of LVM, determine if the call was successful and + possibly log warnings.''' # Filter out warning about intended over-provisioning. # Upstream discussion about missing option to silence it: # https://bugzilla.redhat.com/1347008 - err = '\n'.join(line for line in err.splitlines() + err = '\n'.join(line for line in stderr.decode().splitlines() if 'exceeds the size of thin pool' not in line) - return_code = p.returncode - if out: - log.debug(out) - if return_code == 0 and err: + if stdout: + log.debug(stdout) + if returncode == 0 and err: log.warning(err) - elif return_code != 0: + elif returncode != 0: assert err, "Command exited unsuccessful, but printed nothing to stderr" err = err.replace('%', '%%') raise qubes.storage.StoragePoolException(err) return True +def qubes_lvm(cmd, log=logging.getLogger('qubes.storage.lvm')): + ''' Call :program:`lvm` to execute an LVM operation ''' + # the only caller for this non-coroutine version is ThinVolume.export() + cmd = _get_lvm_cmdline(cmd) + environ = os.environ.copy() + environ['LC_ALL'] = 'C.utf8' + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + close_fds=True, env=environ) + out, err = p.communicate() + return _process_lvm_output(p.returncode, out, err, log) + +@asyncio.coroutine +def qubes_lvm_coro(cmd, log=logging.getLogger('qubes.storage.lvm')): + ''' Call :program:`lvm` to execute an LVM operation + + Coroutine version of :py:func:`qubes_lvm`''' + cmd = _get_lvm_cmdline(cmd) + environ = os.environ.copy() + environ['LC_ALL'] = 'C.utf8' + p = yield from asyncio.create_subprocess_exec(*cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, env=environ) + out, err = yield from p.communicate() + return _process_lvm_output(p.returncode, out, err, log) + def reset_cache(): qubes.storage.lvm.size_cache = init_cache() + +@asyncio.coroutine +def reset_cache_coro(): + qubes.storage.lvm.size_cache = yield from init_cache_coro() diff --git a/qubes/tests/storage_lvm.py b/qubes/tests/storage_lvm.py index 3bead18ef..3f320790f 100644 --- a/qubes/tests/storage_lvm.py +++ b/qubes/tests/storage_lvm.py @@ -136,10 +136,10 @@ def test_001_origin_volume(self): self.assertEqual(volume.name, 'root') self.assertEqual(volume.pool, self.pool.name) self.assertEqual(volume.size, qubes.config.defaults['root_img_size']) - volume.create() + self.loop.run_until_complete(volume.create()) path = "/dev/%s" % volume.vid self.assertTrue(os.path.exists(path), path) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_003_read_write_volume(self): ''' Test read-write volume creation ''' @@ -156,10 +156,10 @@ def test_003_read_write_volume(self): self.assertEqual(volume.name, 'root') self.assertEqual(volume.pool, self.pool.name) self.assertEqual(volume.size, qubes.config.defaults['root_img_size']) - volume.create() + self.loop.run_until_complete(volume.create()) path = "/dev/%s" % volume.vid self.assertTrue(os.path.exists(path), path) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_004_size(self): with self.assertNotRaises(NotImplementedError): @@ -207,11 +207,11 @@ def test_006_resize(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() - self.addCleanup(volume.remove) + self.loop.run_until_complete(volume.create()) + self.addCleanup(self.loop.run_until_complete, volume.remove()) path = "/dev/%s" % volume.vid new_size = 64 * 1024 ** 2 - volume.resize(new_size) + self.loop.run_until_complete(volume.resize(new_size)) self.assertEqual(self._get_size(path), new_size) self.assertEqual(volume.size, new_size) @@ -226,17 +226,17 @@ def test_007_resize_running(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() - self.addCleanup(volume.remove) - volume.start() + self.loop.run_until_complete(volume.create()) + self.addCleanup(self.loop.run_until_complete, volume.remove()) + self.loop.run_until_complete(volume.start()) path = "/dev/%s" % volume.vid path2 = "/dev/%s" % volume._vid_snap new_size = 64 * 1024 ** 2 - volume.resize(new_size) + self.loop.run_until_complete(volume.resize(new_size)) self.assertEqual(self._get_size(path), old_size) self.assertEqual(self._get_size(path2), new_size) self.assertEqual(volume.size, new_size) - volume.stop() + self.loop.run_until_complete(volume.stop()) self.assertEqual(self._get_size(path), new_size) self.assertEqual(volume.size, new_size) @@ -271,18 +271,18 @@ def test_008_commit(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() + self.loop.run_until_complete(volume.create()) path_snap = '/dev/' + volume._vid_snap self.assertFalse(os.path.exists(path_snap), path_snap) origin_uuid = self._get_lv_uuid(volume.path) - volume.start() + self.loop.run_until_complete(volume.start()) snap_uuid = self._get_lv_uuid(path_snap) self.assertNotEqual(origin_uuid, snap_uuid) path = volume.path self.assertTrue(path.startswith('/dev/' + volume.vid), '{} does not start with /dev/{}'.format(path, volume.vid)) self.assertTrue(os.path.exists(path), path) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_009_interrupted_commit(self): ''' Test volume changes commit''' @@ -317,7 +317,7 @@ def test_009_interrupted_commit(self): revisions[1].lstrip('-'): '2018-03-14T22:18:25', } self.assertEqual(volume.revisions, expected_revisions) - volume.start() + self.loop.run_until_complete(volume.start()) self.assertEqual(volume.revisions, expected_revisions) snap_uuid = self._get_lv_uuid(path_snap) self.assertEqual(orig_uuids['-snap'], snap_uuid) @@ -326,7 +326,7 @@ def test_009_interrupted_commit(self): '/dev/' + volume.vid + revisions[1]) with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = [521065906] - volume.stop() + self.loop.run_until_complete(volume.stop()) expected_revisions = { revisions[0].lstrip('-'): '2018-03-14T22:18:24', revisions[1].lstrip('-'): '2018-03-14T22:18:25', @@ -337,7 +337,7 @@ def test_009_interrupted_commit(self): self.assertEqual(snap_uuid, self._get_lv_uuid(volume.path)) self.assertFalse(os.path.exists(path_snap), path_snap) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_010_migration1(self): '''Start with old revisions, then start interacting using new code''' @@ -371,7 +371,7 @@ def test_010_migration1(self): self.assertEqual(volume.revisions, expected_revisions) self.assertEqual(volume.path, '/dev/' + volume.vid) - volume.start() + self.loop.run_until_complete(volume.start()) snap_uuid = self._get_lv_uuid(path_snap) self.assertNotEqual(orig_uuids[''], snap_uuid) snap_origin_uuid = self._get_lv_origin_uuid(path_snap) @@ -382,7 +382,7 @@ def test_010_migration1(self): with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = ('1521065906', '1521065907') - volume.stop() + self.loop.run_until_complete(volume.stop()) revisions.extend(['-1521065906-back']) expected_revisions = { revisions[2].lstrip('-'): '2018-03-14T22:18:25', @@ -397,7 +397,7 @@ def test_010_migration1(self): prev_path = '/dev/' + volume.vid + revisions[3] self.assertEqual(self._get_lv_uuid(prev_path), orig_uuids['']) - volume.remove() + self.loop.run_until_complete(volume.remove()) for rev in revisions: path = '/dev/' + volume.vid + rev self.assertFalse(os.path.exists(path), path) @@ -438,7 +438,7 @@ def test_011_migration2(self): with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = ('1521065906', '1521065907') - volume.stop() + self.loop.run_until_complete(volume.stop()) revisions.extend(['-1521065906-back']) expected_revisions = { revisions[2].lstrip('-'): '2018-03-14T22:18:26', @@ -452,7 +452,7 @@ def test_011_migration2(self): prev_path = '/dev/' + volume.vid + revisions[2] self.assertEqual(self._get_lv_uuid(prev_path), orig_uuids['']) - volume.remove() + self.loop.run_until_complete(volume.remove()) for rev in revisions: path = '/dev/' + volume.vid + rev self.assertFalse(os.path.exists(path), path) @@ -487,14 +487,14 @@ def test_012_migration3(self): self.assertTrue(volume.path, '/dev/' + volume.vid) self.assertTrue(volume.is_dirty()) - volume.start() + self.loop.run_until_complete(volume.start()) self.assertEqual(volume.revisions, expected_revisions) self.assertEqual(volume.path, '/dev/' + volume.vid) # -snap LV should be unchanged self.assertEqual(self._get_lv_uuid(volume._vid_snap), orig_uuids['-snap']) - volume.remove() + self.loop.run_until_complete(volume.remove()) for rev in revisions: path = '/dev/' + volume.vid + rev self.assertFalse(os.path.exists(path), path) @@ -531,12 +531,12 @@ def test_013_migration4(self): with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = ('1521065906', '1521065907') - volume.stop() + self.loop.run_until_complete(volume.stop()) expected_revisions = {} self.assertEqual(volume.revisions, expected_revisions) self.assertEqual(volume.path, '/dev/' + volume.vid) - volume.remove() + self.loop.run_until_complete(volume.remove()) for rev in revisions: path = '/dev/' + volume.vid + rev self.assertFalse(os.path.exists(path), path) @@ -555,13 +555,13 @@ def test_014_commit_keep_0(self): volume = self.app.get_pool(self.pool.name).init_volume(vm, config) # mock logging, to not interfere with time.time() mock volume.log = unittest.mock.Mock() - volume.create() + self.loop.run_until_complete(volume.create()) self.assertFalse(volume.is_dirty()) path = volume.path expected_revisions = {} self.assertEqual(volume.revisions, expected_revisions) - volume.start() + self.loop.run_until_complete(volume.start()) self.assertEqual(volume.revisions, expected_revisions) path_snap = '/dev/' + volume._vid_snap snap_uuid = self._get_lv_uuid(path_snap) @@ -570,14 +570,14 @@ def test_014_commit_keep_0(self): with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = [521065906] - volume.stop() + self.loop.run_until_complete(volume.stop()) self.assertFalse(volume.is_dirty()) self.assertEqual(volume.revisions, {}) self.assertEqual(volume.path, '/dev/' + volume.vid) self.assertEqual(snap_uuid, self._get_lv_uuid(volume.path)) self.assertFalse(os.path.exists(path_snap), path_snap) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_020_revert_last(self): ''' Test volume revert''' @@ -591,11 +591,11 @@ def test_020_revert_last(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() - volume.start() - volume.stop() - volume.start() - volume.stop() + self.loop.run_until_complete(volume.create()) + self.loop.run_until_complete(volume.start()) + self.loop.run_until_complete(volume.stop()) + self.loop.run_until_complete(volume.start()) + self.loop.run_until_complete(volume.stop()) self.assertEqual(len(volume.revisions), 2) revisions = volume.revisions revision_id = max(revisions.keys()) @@ -604,7 +604,7 @@ def test_020_revert_last(self): rev_uuid = self._get_lv_uuid(volume.vid + '-' + revision_id) self.assertFalse(volume.is_dirty()) self.assertNotEqual(current_uuid, rev_uuid) - volume.revert() + self.loop.run_until_complete(volume.revert()) path_snap = '/dev/' + volume._vid_snap self.assertFalse(os.path.exists(path_snap), path_snap) self.assertEqual(current_path, volume.path) @@ -612,7 +612,7 @@ def test_020_revert_last(self): self.assertEqual(new_uuid, rev_uuid) self.assertEqual(volume.revisions, revisions) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_021_revert_earlier(self): ''' Test volume revert''' @@ -626,11 +626,11 @@ def test_021_revert_earlier(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() - volume.start() - volume.stop() - volume.start() - volume.stop() + self.loop.run_until_complete(volume.create()) + self.loop.run_until_complete(volume.start()) + self.loop.run_until_complete(volume.stop()) + self.loop.run_until_complete(volume.start()) + self.loop.run_until_complete(volume.stop()) self.assertEqual(len(volume.revisions), 2) revisions = volume.revisions revision_id = min(revisions.keys()) @@ -639,7 +639,7 @@ def test_021_revert_earlier(self): rev_uuid = self._get_lv_uuid(volume.vid + '-' + revision_id) self.assertFalse(volume.is_dirty()) self.assertNotEqual(current_uuid, rev_uuid) - volume.revert(revision_id) + self.loop.run_until_complete(volume.revert(revision_id)) path_snap = '/dev/' + volume._vid_snap self.assertFalse(os.path.exists(path_snap), path_snap) self.assertEqual(current_path, volume.path) @@ -647,7 +647,7 @@ def test_021_revert_earlier(self): self.assertEqual(new_uuid, rev_uuid) self.assertEqual(volume.revisions, revisions) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_030_import_data(self): ''' Test volume import''' @@ -661,14 +661,14 @@ def test_030_import_data(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() + self.loop.run_until_complete(volume.create()) current_uuid = self._get_lv_uuid(volume.path) self.assertFalse(volume.is_dirty()) - import_path = volume.import_data() + import_path = self.loop.run_until_complete(volume.import_data()) import_uuid = self._get_lv_uuid(import_path) self.assertNotEqual(current_uuid, import_uuid) # success - commit data - volume.import_data_end(True) + self.loop.run_until_complete(volume.import_data_end(True)) new_current_uuid = self._get_lv_uuid(volume.path) self.assertEqual(new_current_uuid, import_uuid) revisions = volume.revisions @@ -678,7 +678,7 @@ def test_030_import_data(self): self._get_lv_uuid(volume.vid + '-' + revision)) self.assertFalse(os.path.exists(import_path), import_path) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_031_import_data_fail(self): ''' Test volume import''' @@ -692,21 +692,21 @@ def test_031_import_data_fail(self): } vm = qubes.tests.storage.TestVM(self) volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume.create() + self.loop.run_until_complete(volume.create()) current_uuid = self._get_lv_uuid(volume.path) self.assertFalse(volume.is_dirty()) - import_path = volume.import_data() + import_path = self.loop.run_until_complete(volume.import_data()) import_uuid = self._get_lv_uuid(import_path) self.assertNotEqual(current_uuid, import_uuid) # fail - discard data - volume.import_data_end(False) + self.loop.run_until_complete(volume.import_data_end(False)) new_current_uuid = self._get_lv_uuid(volume.path) self.assertEqual(new_current_uuid, current_uuid) revisions = volume.revisions self.assertEqual(len(revisions), 0) self.assertFalse(os.path.exists(import_path), import_path) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_032_import_volume_same_pool(self): '''Import volume from the same pool''' @@ -721,7 +721,7 @@ def test_032_import_volume_same_pool(self): } vm = qubes.tests.storage.TestVM(self) source_volume = self.app.get_pool(self.pool.name).init_volume(vm, config) - source_volume.create() + self.loop.run_until_complete(source_volume.create()) source_uuid = self._get_lv_uuid(source_volume.path) @@ -738,7 +738,7 @@ def test_032_import_volume_same_pool(self): volume.log = unittest.mock.Mock() with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = [1521065905] - volume.create() + self.loop.run_until_complete(volume.create()) self.assertEqual(volume.revisions, {}) uuid_before = self._get_lv_uuid(volume.path) @@ -760,8 +760,8 @@ def test_032_import_volume_same_pool(self): } self.assertEqual(volume.revisions, expected_revisions) - volume.remove() - source_volume.remove() + self.loop.run_until_complete(volume.remove()) + self.loop.run_until_complete(source_volume.remove()) def test_033_import_volume_different_pool(self): '''Import volume from a different pool''' @@ -780,7 +780,7 @@ def test_033_import_volume_different_pool(self): volume.log = unittest.mock.Mock() with unittest.mock.patch('time.time') as mock_time: mock_time.side_effect = [1521065905] - volume.create() + self.loop.run_until_complete(volume.create()) self.assertEqual(volume.revisions, {}) uuid_before = self._get_lv_uuid(volume.path) @@ -807,7 +807,7 @@ def test_033_import_volume_different_pool(self): } self.assertEqual(volume.revisions, expected_revisions) - volume.remove() + self.loop.run_until_complete(volume.remove()) def test_040_volatile(self): '''Volatile volume test''' @@ -821,21 +821,21 @@ def test_040_volatile(self): volume = self.app.get_pool(self.pool.name).init_volume(vm, config) # volatile volume don't need any file, verify should succeed self.assertTrue(volume.verify()) - volume.create() + self.loop.run_until_complete(volume.create()) self.assertTrue(volume.verify()) self.assertFalse(volume.save_on_stop) self.assertFalse(volume.snap_on_start) path = volume.path self.assertEqual(path, '/dev/' + volume.vid) self.assertFalse(os.path.exists(path)) - volume.start() + self.loop.run_until_complete(volume.start()) self.assertTrue(os.path.exists(path)) vol_uuid = self._get_lv_uuid(path) - volume.start() + self.loop.run_until_complete(volume.start()) self.assertTrue(os.path.exists(path)) vol_uuid2 = self._get_lv_uuid(path) self.assertNotEqual(vol_uuid, vol_uuid2) - volume.stop() + self.loop.run_until_complete(volume.stop()) self.assertFalse(os.path.exists(path)) def test_050_snapshot_volume(self): @@ -850,7 +850,7 @@ def test_050_snapshot_volume(self): vm = qubes.tests.storage.TestVM(self) volume_origin = self.app.get_pool(self.pool.name).init_volume( vm, config_origin) - volume_origin.create() + self.loop.run_until_complete(volume_origin.create()) config_snapshot = { 'name': 'root2', 'pool': self.pool.name, @@ -868,11 +868,11 @@ def test_050_snapshot_volume(self): # only origin volume really needs to exist, verify should succeed # even before create self.assertTrue(volume.verify()) - volume.create() + self.loop.run_until_complete(volume.create()) path = volume.path self.assertEqual(path, '/dev/' + volume.vid) self.assertFalse(os.path.exists(path), path) - volume.start() + self.loop.run_until_complete(volume.start()) # snapshot volume isn't considered dirty at any time self.assertFalse(volume.is_dirty()) # not outdated yet @@ -882,13 +882,13 @@ def test_050_snapshot_volume(self): self.assertEqual(origin_uuid, snap_origin_uuid) # now make it outdated - volume_origin.start() - volume_origin.stop() + self.loop.run_until_complete(volume_origin.start()) + self.loop.run_until_complete(volume_origin.stop()) self.assertTrue(volume.is_outdated()) origin_uuid = self._get_lv_uuid(volume_origin.path) self.assertNotEqual(origin_uuid, snap_origin_uuid) - volume.stop() + self.loop.run_until_complete(volume.stop()) # stopped volume is never outdated self.assertFalse(volume.is_outdated()) path = volume.path @@ -896,8 +896,8 @@ def test_050_snapshot_volume(self): path = '/dev/' + volume._vid_snap self.assertFalse(os.path.exists(path), path) - volume.remove() - volume_origin.remove() + self.loop.run_until_complete(volume.remove()) + self.loop.run_until_complete(volume_origin.remove()) def test_100_pool_list_volumes(self): config = { @@ -911,24 +911,24 @@ def test_100_pool_list_volumes(self): config2 = config.copy() vm = qubes.tests.storage.TestVM(self) volume1 = self.app.get_pool(self.pool.name).init_volume(vm, config) - volume1.create() + self.loop.run_until_complete(volume1.create()) config2['name'] = 'private' volume2 = self.app.get_pool(self.pool.name).init_volume(vm, config2) - volume2.create() + self.loop.run_until_complete(volume2.create()) # create some revisions - volume1.start() - volume1.stop() + self.loop.run_until_complete(volume1.start()) + self.loop.run_until_complete(volume1.stop()) # and have one in dirty state - volume2.start() + self.loop.run_until_complete(volume2.start()) self.assertIn(volume1, list(self.pool.volumes)) self.assertIn(volume2, list(self.pool.volumes)) - volume1.remove() + self.loop.run_until_complete(volume1.remove()) self.assertNotIn(volume1, list(self.pool.volumes)) self.assertIn(volume2, list(self.pool.volumes)) - volume2.remove() + self.loop.run_until_complete(volume2.remove()) self.assertNotIn(volume1, list(self.pool.volumes)) self.assertNotIn(volume1, list(self.pool.volumes)) From e1f65bdf7b823562db25a8714ec3994e6ef547e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 04:36:13 +0200 Subject: [PATCH 17/33] vm: add shutdown_timeout property, make vm.shutdown(wait=True) use it vm.shutdown(wait=True) waited indefinitely for the shutdown, which makes useless without some boilerplate handling the timeout. Since the timeout may depend on the operating system inside, add a per-VM property for it, with value inheritance from template and then from global default_shutdown_timeout property. When timeout is reached, the method raises exception - whether to kill it or not is left to the caller. Fixes QubesOS/qubes-issues#1696 --- qubes/app.py | 6 ++++++ qubes/exc.py | 8 ++++++++ qubes/vm/qubesvm.py | 24 +++++++++++++++++++++--- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/qubes/app.py b/qubes/app.py index 8d0bd15e1..5e72159fb 100644 --- a/qubes/app.py +++ b/qubes/app.py @@ -725,6 +725,12 @@ class Qubes(qubes.PropertyHolder): doc='''Default time in seconds after which qrexec connection attempt is deemed failed''') + default_shutdown_timeout = qubes.property('default_shutdown_timeout', + load_stage=3, + default=60, + type=int, + doc='''Default time in seconds for VM shutdown to complete''') + stats_interval = qubes.property('stats_interval', default=3, type=int, diff --git a/qubes/exc.py b/qubes/exc.py index 427ae4fa8..3c7797d1f 100644 --- a/qubes/exc.py +++ b/qubes/exc.py @@ -101,6 +101,14 @@ def __init__(self, vm, msg=None): super(QubesVMNotHaltedError, self).__init__(vm, msg or 'Domain is not powered off: {!r}'.format(vm.name)) +class QubesVMShutdownTimeoutError(QubesVMError): + '''Domain shutdown timed out. + + ''' + def __init__(self, vm, msg=None): + super(QubesVMShutdownTimeoutError, self).__init__(vm, + msg or 'Domain shutdown timed out: {!r}'.format(vm.name)) + class QubesNoTemplateError(QubesVMError): '''Cannot start domain, because there is no template''' diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index 7b0237769..b2f975874 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -517,6 +517,14 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM): failed. Operating system inside VM should be able to boot in this time.''') + shutdown_timeout = qubes.property('shutdown_timeout', type=int, + default=_default_with_template('shutdown_timeout', + lambda self: self.app.default_shutdown_timeout), + setter=_setter_positive_int, + doc='''Time in seconds for shutdown of the VM, after which VM may be + forcefully powered off. Operating system inside VM should be + able to fully shutdown in this time.''') + autostart = qubes.property('autostart', default=False, type=bool, setter=qubes.property.bool, doc='''Setting this to `True` means that VM should be autostarted on @@ -1055,9 +1063,13 @@ def on_domain_stopped(self, _event, **_kwargs): self.name) @asyncio.coroutine - def shutdown(self, force=False, wait=False): + def shutdown(self, force=False, wait=False, timeout=None): '''Shutdown domain. + :param force: ignored + :param wait: wait for shutdown to complete + :param timeout: shutdown wait timeout (for *wait*=True), defaults to + :py:attr:`shutdown_timeout` :raises qubes.exc.QubesVMNotStartedError: \ when domain is already shut down. ''' @@ -1070,8 +1082,14 @@ def shutdown(self, force=False, wait=False): self.libvirt_domain.shutdown() - while wait and not self.is_halted(): - yield from asyncio.sleep(0.25) + if wait: + if timeout is None: + timeout = self.shutdown_timeout + while timeout > 0 and not self.is_halted(): + yield from asyncio.sleep(0.25) + timeout -= 0.25 + if not self.is_halted(): + raise qubes.exc.QubesVMShutdownTimeoutError(self) return self From 5be003d53904fe3585dbc63e9cc4585e4dd32961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 04:44:47 +0200 Subject: [PATCH 18/33] vm/dispvm: fix DispVM cleanup First unregister the domain from collection, and only then call remove_from_disk(). Removing it from collection prevent further calls being made to it. Or if anything else keep a reference to it (for example as a netvm), then abort the operation. Additionally this makes it unnecessary to take startup lock when cleaning it up in tests. --- qubes/tests/__init__.py | 7 ------- qubes/vm/dispvm.py | 4 ++-- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index c3f3a2fd6..787775995 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -789,13 +789,6 @@ def _remove_vm_qubes(self, vm): vmname = vm.name app = vm.app - # avoid race with DispVM.auto_cleanup=True - try: - self.loop.run_until_complete( - asyncio.wait_for(vm.startup_lock.acquire(), 10)) - except asyncio.TimeoutError: - pass - try: # XXX .is_running() may throw libvirtError if undefined if vm.is_running(): diff --git a/qubes/vm/dispvm.py b/qubes/vm/dispvm.py index 2179dd9f8..0c6389f3f 100644 --- a/qubes/vm/dispvm.py +++ b/qubes/vm/dispvm.py @@ -142,8 +142,8 @@ def on_domain_shutdown(self, _event, **_kwargs): def _auto_cleanup(self): '''Do auto cleanup if enabled''' if self.auto_cleanup and self in self.app.domains: - yield from self.remove_from_disk() del self.app.domains[self] + yield from self.remove_from_disk() self.app.save() @classmethod @@ -193,8 +193,8 @@ def cleanup(self): pass # if auto_cleanup is set, this will be done automatically if not self.auto_cleanup: - yield from self.remove_from_disk() del self.app.domains[self] + yield from self.remove_from_disk() self.app.save() @asyncio.coroutine From 2c1629da042d6495c6dae5ae1fdc411cc105f825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 04:52:27 +0200 Subject: [PATCH 19/33] vm: call after-shutdown cleanup also from vm.kill and vm.shutdown Cleaning up after domain shutdown (domain-stopped and domain-shutdown events) relies on libvirt events which may be unreliable in some cases (events may be processed with some delay, of if libvirt was restarted in the meantime, may not happen at all). So, instead of ensuring only proper ordering between shutdown cleanup and next startup, also trigger the cleanup when we know for sure domain isn't running: - at vm.kill() - after libvirt confirms domain was destroyed - at vm.shutdown(wait=True) - after successful shutdown - at vm.remove_from_disk() - after ensuring it isn't running but just before actually removing it This fixes various race conditions: - qvm-kill && qvm-remove: remove could happen before shutdown cleanup was done and storage driver would be confused about that - qvm-shutdown --wait && qvm-clone: clone could happen before new content was commited to the original volume, making the copy of previous VM state (and probably more) Previously it wasn't such a big issue on default configuration, because LVM driver was fully synchronous, effectively blocking the whole qubesd for the time the cleanup happened. To avoid code duplication, factor out _ensure_shutdown_handled function calling actual cleanup (and possibly canceling one called with libvirt event). Note that now, "Duplicated stopped event from libvirt received!" warning may happen in normal circumstances, not only because of some bug. It is very important that post-shutdown cleanup happen when domain is not running. To ensure that, take startup_lock and under it 1) ensure its halted and only then 2) execute the cleanup. This isn't necessary when removing it from disk, because its already removed from the collection at that time, which also avoids other calls to it (see also "vm/dispvm: fix DispVM cleanup" commit). Actually, taking the startup_lock in remove_from_disk function would cause a deadlock in DispVM auto cleanup code: - vm.kill (or other trigger for the cleanup) - vm.startup_lock acquire <==== - vm._ensure_shutdown_handled - domain-shutdown event - vm._auto_cleanup (in DispVM class) - vm.remove_from_disk - cannot take vm.startup_lock again --- qubes/vm/qubesvm.py | 90 ++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 34 deletions(-) diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index b2f975874..faa7467b1 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -878,6 +878,36 @@ def on_property_pre_del_autostart(self, event, name, oldvalue=None): # methods for changing domain state # + @asyncio.coroutine + def _ensure_shutdown_handled(self): + '''Make sure previous shutdown is fully handled. + MUST NOT be called when domain is running. + ''' + with (yield from self._domain_stopped_lock): + # Don't accept any new stopped event's till a new VM has been + # created. If we didn't received any stopped event or it wasn't + # handled yet we will handle this in the next lines. + self._domain_stopped_event_received = True + + if self._domain_stopped_future is not None: + # Libvirt stopped event was already received, so cancel the + # future. If it didn't generate the Qubes events yet we + # will do it below. + self._domain_stopped_future.cancel() + self._domain_stopped_future = None + + if not self._domain_stopped_event_handled: + # No Qubes domain-stopped events have been generated yet. + # So do this now. + + # Set this immediately such that we don't generate events + # twice if an exception gets thrown. + self._domain_stopped_event_handled = True + + yield from self.fire_event_async('domain-stopped') + yield from self.fire_event_async('domain-shutdown') + + @asyncio.coroutine def start(self, start_guid=True, notify_function=None, mem_required=None): @@ -894,29 +924,7 @@ def start(self, start_guid=True, notify_function=None, if self.get_power_state() != 'Halted': return self - with (yield from self._domain_stopped_lock): - # Don't accept any new stopped event's till a new VM has been - # created. If we didn't received any stopped event or it wasn't - # handled yet we will handle this in the next lines. - self._domain_stopped_event_received = True - - if self._domain_stopped_future is not None: - # Libvirt stopped event was already received, so cancel the - # future. If it didn't generate the Qubes events yet we - # will do it below. - self._domain_stopped_future.cancel() - self._domain_stopped_future = None - - if not self._domain_stopped_event_handled: - # No Qubes domain-stopped events have been generated yet. - # So do this now. - - # Set this immediately such that we don't generate events - # twice if an exception gets thrown. - self._domain_stopped_event_handled = True - - yield from self.fire_event_async('domain-stopped') - yield from self.fire_event_async('domain-shutdown') + yield from self._ensure_shutdown_handled() self.log.info('Starting {}'.format(self.name)) @@ -1030,8 +1038,8 @@ def on_libvirt_domain_stopped(self): return if self._domain_stopped_event_received: - self.log.warning('Duplicated stopped event from libvirt received!') - # ignore this unexpected event + # ignore this event - already triggered by shutdown(), kill(), + # or subsequent start() return self._domain_stopped_event_received = True @@ -1088,8 +1096,12 @@ def shutdown(self, force=False, wait=False, timeout=None): while timeout > 0 and not self.is_halted(): yield from asyncio.sleep(0.25) timeout -= 0.25 - if not self.is_halted(): - raise qubes.exc.QubesVMShutdownTimeoutError(self) + with (yield from self.startup_lock): + if self.is_halted(): + # make sure all shutdown tasks are completed + yield from self._ensure_shutdown_handled() + else: + raise qubes.exc.QubesVMShutdownTimeoutError(self) return self @@ -1104,13 +1116,17 @@ def kill(self): if not self.is_running() and not self.is_paused(): raise qubes.exc.QubesVMNotStartedError(self) - try: - self.libvirt_domain.destroy() - except libvirt.libvirtError as e: - if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: - raise qubes.exc.QubesVMNotStartedError(self) - else: - raise + with (yield from self.startup_lock): + try: + self.libvirt_domain.destroy() + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: + raise qubes.exc.QubesVMNotStartedError(self) + else: + raise + + # make sure all shutdown tasks are completed + yield from self._ensure_shutdown_handled() return self @@ -1477,6 +1493,12 @@ def remove_from_disk(self): "Can't remove VM {!s}, beacuse it's in state {!r}.".format( self, self.get_power_state())) + # make sure shutdown is handled before removing anything, but only if + # handling is pending; if not, we may be called from within + # domain-shutdown event (DispVM._auto_cleanup), which would deadlock + if not self._domain_stopped_event_handled: + yield from self._ensure_shutdown_handled() + yield from self.fire_event_async('domain-remove-from-disk') try: # TODO: make it async? From cf8b6219a9f6c13246894ce45dc6c74e56a4250b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 05:11:24 +0200 Subject: [PATCH 20/33] tests: make use of vm.shutdown(wait=True) --- qubes/tests/__init__.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index 787775995..2940c7c79 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -1070,15 +1070,12 @@ def enter_keys_in_window(self, title, keys): subprocess.check_call(command) def shutdown_and_wait(self, vm, timeout=60): - self.loop.run_until_complete(vm.shutdown()) - while timeout > 0: - if not vm.is_running(): - return - self.loop.run_until_complete(asyncio.sleep(1)) - timeout -= 1 - name = vm.name - del vm - self.fail("Timeout while waiting for VM {} shutdown".format(name)) + try: + self.loop.run_until_complete(vm.shutdown(wait=True, timeout=timeout)) + except qubes.exc.QubesException: + name = vm.name + del vm + self.fail("Timeout while waiting for VM {} shutdown".format(name)) def prepare_hvm_system_linux(self, vm, init_script, extra_files=None): if not os.path.exists('/usr/lib/grub/i386-pc'): From 4e762788a9b574d93c5a77f6d34ba5af3b6f14b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 05:12:27 +0200 Subject: [PATCH 21/33] tests: check if qubes-vm@ service is disabled on domain removal Test for QubesOS/qubes-issues#4014 --- qubes/tests/integ/basic.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/qubes/tests/integ/basic.py b/qubes/tests/integ/basic.py index 6d535e7e9..dfb57e391 100644 --- a/qubes/tests/integ/basic.py +++ b/qubes/tests/integ/basic.py @@ -115,6 +115,26 @@ def test_120_start_standalone_with_cdrom_dom0(self): self.loop.run_until_complete(asyncio.sleep(1)) self.assertFalse(self.vm.is_running()) + def test_130_autostart_disable_on_remove(self): + vm = self.app.add_new_vm(qubes.vm.appvm.AppVM, + name=self.make_vm_name('vm'), + template=self.app.default_template, + label='red') + + self.assertIsNotNone(vm) + self.loop.run_until_complete(vm.create_on_disk()) + vm.autostart = True + self.assertTrue(os.path.exists( + '/etc/systemd/system/multi-user.target.wants/' + 'qubes-vm@{}.service'.format(vm.name)), + "systemd service not enabled by autostart=True") + del self.app.domains[vm] + self.loop.run_until_complete(vm.remove_from_disk()) + self.assertFalse(os.path.exists( + '/etc/systemd/system/multi-user.target.wants/' + 'qubes-vm@{}.service'.format(vm.name)), + "systemd service not disabled on domain remove") + def _test_200_on_domain_start(self, vm, event, **_kwargs): '''Simulate domain crash just after startup''' vm.libvirt_domain.destroy() From 08ddeee9fbb55768fe930d22947991fce60472fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 05:19:07 +0200 Subject: [PATCH 22/33] tests: improve VMs cleanup wrt custom templates Cleanup VMs in template reverse topological order, not network one. Network can be set to None to break dependency, but template can't. For netvm to be changed, kill VMs first (kill doesn't check network dependency), so netvm change will not trigger side effects (runtime change, which could fail). This fixes cleanup for tests creating custom templates - previously order was undefined and if template was tried removed before its child VMs, it fails. All the relevant files were removed later anyway, but it lead to python objects leaks. --- qubes/tests/__init__.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index 2940c7c79..b66a4ee75 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -789,13 +789,6 @@ def _remove_vm_qubes(self, vm): vmname = vm.name app = vm.app - try: - # XXX .is_running() may throw libvirtError if undefined - if vm.is_running(): - self.loop.run_until_complete(vm.kill()) - except: # pylint: disable=bare-except - pass - try: self.loop.run_until_complete(vm.remove_from_disk()) except: # pylint: disable=bare-except @@ -876,18 +869,36 @@ def remove_vms(self, vms): vms = list(vms) if not vms: return + # first kill all the domains, to avoid side effects of changing netvm + for vm in vms: + try: + # XXX .is_running() may throw libvirtError if undefined + if vm.is_running(): + self.loop.run_until_complete(vm.kill()) + except: # pylint: disable=bare-except + pass # break dependencies for vm in vms: vm.default_dispvm = None - # then remove in reverse topological order (wrt netvm), using naive + vm.netvm = None + # take app instance from any VM to be removed + app = vms[0].app + if app.default_dispvm in vms: + app.default_dispvm = None + if app.default_netvm in vms: + app.default_netvm = None + del app + # then remove in reverse topological order (wrt template), using naive # algorithm - # this heavily depends on lack of netvm loops + # this heavily depends on lack of template loops, but those are + # impossible while vms: vm = vms.pop(0) # make sure that all connected VMs are going to be removed, # otherwise this will loop forever - assert all(x in vms for x in vm.connected_vms) - if list(vm.connected_vms): + child_vms = list(getattr(vm, 'appvms', [])) + assert all(x in vms for x in child_vms) + if child_vms: # if still something use this VM, put it at the end of queue # and try next one vms.append(vm) From a972c61914e0a5c12a2e91d9b7f6e5157fc49cfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 16:26:39 +0200 Subject: [PATCH 23/33] tests: use socat instead of nc socat have only one variant, so one command line syntax to handle. It's also installed by default in Qubes VMs. --- qubes/tests/integ/network.py | 125 +++++++++++------------------------ 1 file changed, 39 insertions(+), 86 deletions(-) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 524dd5b88..1b4c9e040 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -32,10 +32,6 @@ import qubes.vm.qubesvm import qubes.vm.appvm -class NcVersion: - Trad = 1 - Nmap = 2 - # noinspection PyAttributeOutsideInit,PyPep8Naming class VmNetworkingMixin(object): @@ -63,18 +59,6 @@ def run_cmd(self, vm, cmd, user="root"): return e.returncode return 0 - def check_nc_version(self, vm): - ''' - :type self: qubes.tests.SystemTestCase | VMNetworkingMixin - :param vm: VM where check ncat version in - ''' - if self.run_cmd(vm, 'nc -h >/dev/null 2>&1') != 0: - self.skipTest('nc not installed') - if self.run_cmd(vm, 'nc -h 2>&1|grep -q nmap.org') == 0: - return NcVersion.Nmap - else: - return NcVersion.Trad - def setUp(self): ''' :type self: qubes.tests.SystemTestCase | VMNetworkingMixin @@ -228,8 +212,6 @@ def test_030_firewallvm_firewall(self): self.testvm1.netvm = self.proxy self.app.save() - nc_version = self.check_nc_version(self.testnetvm) - # block all for first self.testvm1.firewall.rules = [qubes.firewall.Rule(action='drop')] @@ -237,10 +219,8 @@ def test_030_firewallvm_firewall(self): self.loop.run_until_complete(self.testvm1.start()) self.assertTrue(self.proxy.is_running()) - nc = self.loop.run_until_complete(self.testnetvm.run( - 'nc -l --send-only -e /bin/hostname -k 1234' - if nc_version == NcVersion.Nmap - else 'while nc -l -e /bin/hostname -p 1234; do true; done')) + server = self.loop.run_until_complete(self.testnetvm.run( + 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -250,11 +230,8 @@ def test_030_firewallvm_firewall(self): self.assertNotEqual(self.run_cmd(self.testvm1, self.ping_ip), 0, "Ping by IP should be blocked") - if nc_version == NcVersion.Nmap: - nc_cmd = "nc -w 1 --recv-only {} 1234".format(self.test_ip) - else: - nc_cmd = "nc -w 1 {} 1234".format(self.test_ip) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + client_cmd = "socat TCP:{}:1234 -".format(self.test_ip) + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") # block all except ICMP @@ -283,7 +260,7 @@ def test_030_firewallvm_firewall(self): time.sleep(3) self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0, "Ping by name failed (should be allowed now)") - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") # block all except target @@ -297,7 +274,7 @@ def test_030_firewallvm_firewall(self): # Ugly hack b/c there is no feedback when the rules are actually # applied time.sleep(3) - self.assertEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection failed (should be allowed now)") # allow all except target @@ -312,11 +289,11 @@ def test_030_firewallvm_firewall(self): # Ugly hack b/c there is no feedback when the rules are actually # applied time.sleep(3) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") finally: - nc.terminate() - self.loop.run_until_complete(nc.wait()) + server.terminate() + self.loop.run_until_complete(server.wait()) def test_040_inter_vm(self): @@ -479,8 +456,6 @@ def test_202_fake_ip_firewall(self): self.testvm1.netvm = self.proxy self.app.save() - nc_version = self.check_nc_version(self.testnetvm) - # block all but ICMP and DNS self.testvm1.firewall.rules = [ @@ -491,10 +466,8 @@ def test_202_fake_ip_firewall(self): self.loop.run_until_complete(self.testvm1.start()) self.assertTrue(self.proxy.is_running()) - nc = self.loop.run_until_complete(self.testnetvm.run( - 'nc -l --send-only -e /bin/hostname -k 1234' - if nc_version == NcVersion.Nmap - else 'while nc -l -e /bin/hostname -p 1234; do true; done')) + server = self.loop.run_until_complete(self.testnetvm.run( + 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -505,15 +478,12 @@ def test_202_fake_ip_firewall(self): "Ping by IP should be allowed") self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0, "Ping by name should be allowed") - if nc_version == NcVersion.Nmap: - nc_cmd = "nc -w 1 --recv-only {} 1234".format(self.test_ip) - else: - nc_cmd = "nc -w 1 {} 1234".format(self.test_ip) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + client_cmd = "socat TCP:{}:1234 -".format(self.test_ip) + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") finally: - nc.terminate() - self.loop.run_until_complete(nc.wait()) + server.terminate() + self.loop.run_until_complete(server.wait()) def test_203_fake_ip_inter_vm_allow(self): '''Access VM with "fake IP" from other VM (when firewall allows) @@ -682,8 +652,6 @@ def test_212_custom_ip_firewall(self): self.testvm1.netvm = self.proxy self.app.save() - nc_version = self.check_nc_version(self.testnetvm) - # block all but ICMP and DNS self.testvm1.firewall.rules = [ @@ -694,10 +662,8 @@ def test_212_custom_ip_firewall(self): self.loop.run_until_complete(self.testvm1.start()) self.assertTrue(self.proxy.is_running()) - nc = self.loop.run_until_complete(self.testnetvm.run( - 'nc -l --send-only -e /bin/hostname -k 1234' - if nc_version == NcVersion.Nmap - else 'while nc -l -e /bin/hostname -p 1234; do true; done')) + server = self.loop.run_until_complete(self.testnetvm.run( + 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -708,15 +674,12 @@ def test_212_custom_ip_firewall(self): "Ping by IP should be allowed") self.assertEqual(self.run_cmd(self.testvm1, self.ping_name), 0, "Ping by name should be allowed") - if nc_version == NcVersion.Nmap: - nc_cmd = "nc -w 1 --recv-only {} 1234".format(self.test_ip) - else: - nc_cmd = "nc -w 1 {} 1234".format(self.test_ip) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + client_cmd = "socat TCP:{}:1234 -".format(self.test_ip) + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") finally: - nc.terminate() - self.loop.run_until_complete(nc.wait()) + server.terminate() + self.loop.run_until_complete(server.wait()) # noinspection PyAttributeOutsideInit,PyPep8Naming class VmIPv6NetworkingMixin(VmNetworkingMixin): @@ -852,9 +815,6 @@ def test_530_ipv6_firewallvm_firewall(self): self.testvm1.netvm = self.proxy self.app.save() - if self.run_cmd(self.testnetvm, 'ncat -h') != 0: - self.skipTest('nmap ncat not installed') - # block all for first self.testvm1.firewall.rules = [qubes.firewall.Rule(action='drop')] @@ -862,8 +822,8 @@ def test_530_ipv6_firewallvm_firewall(self): self.loop.run_until_complete(self.testvm1.start()) self.assertTrue(self.proxy.is_running()) - nc = self.loop.run_until_complete(self.testnetvm.run( - 'ncat -l --send-only -e /bin/hostname -k 1234')) + server = self.loop.run_until_complete(self.testnetvm.run( + 'socat TCP6-LISTEN:1234,fork EXEC:/bin/hostname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping6_ip), 0, @@ -873,8 +833,9 @@ def test_530_ipv6_firewallvm_firewall(self): self.assertNotEqual(self.run_cmd(self.testvm1, self.ping6_ip), 0, "Ping by IP should be blocked") - nc_cmd = "ncat -w 1 --recv-only {} 1234".format(self.test_ip6) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + client6_cmd = "socat TCP:[{}]:1234 -".format(self.test_ip6) + client4_cmd = "socat TCP:{}:1234 -".format(self.test_ip) + self.assertNotEqual(self.run_cmd(self.testvm1, client6_cmd), 0, "TCP connection should be blocked") # block all except ICMP @@ -904,7 +865,7 @@ def test_530_ipv6_firewallvm_firewall(self): time.sleep(3) self.assertEqual(self.run_cmd(self.testvm1, self.ping6_name), 0, "Ping by name failed (should be allowed now)") - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertNotEqual(self.run_cmd(self.testvm1, client6_cmd), 0, "TCP connection should be blocked") # block all except target @@ -919,7 +880,7 @@ def test_530_ipv6_firewallvm_firewall(self): # Ugly hack b/c there is no feedback when the rules are actually # applied time.sleep(3) - self.assertEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertEqual(self.run_cmd(self.testvm1, client6_cmd), 0, "TCP connection failed (should be allowed now)") # block all except target - by name @@ -934,10 +895,9 @@ def test_530_ipv6_firewallvm_firewall(self): # Ugly hack b/c there is no feedback when the rules are actually # applied time.sleep(3) - self.assertEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertEqual(self.run_cmd(self.testvm1, client6_cmd), 0, "TCP (IPv6) connection failed (should be allowed now)") - self.assertEqual(self.run_cmd(self.testvm1, - nc_cmd.replace(self.test_ip6, self.test_ip)), + self.assertEqual(self.run_cmd(self.testvm1, client4_cmd), 0, "TCP (IPv4) connection failed (should be allowed now)") @@ -953,11 +913,11 @@ def test_530_ipv6_firewallvm_firewall(self): # Ugly hack b/c there is no feedback when the rules are actually # applied time.sleep(3) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + self.assertNotEqual(self.run_cmd(self.testvm1, client6_cmd), 0, "TCP connection should be blocked") finally: - nc.terminate() - self.loop.run_until_complete(nc.wait()) + server.terminate() + self.loop.run_until_complete(server.wait()) def test_540_ipv6_inter_vm(self): @@ -1081,8 +1041,6 @@ def test_712_ipv6_custom_ip_firewall(self): self.testvm1.netvm = self.proxy self.app.save() - nc_version = self.check_nc_version(self.testnetvm) - # block all but ICMP and DNS self.testvm1.firewall.rules = [ @@ -1093,10 +1051,8 @@ def test_712_ipv6_custom_ip_firewall(self): self.loop.run_until_complete(self.testvm1.start()) self.assertTrue(self.proxy.is_running()) - nc = self.loop.run_until_complete(self.testnetvm.run( - 'nc -l --send-only -e /bin/hostname -k 1234' - if nc_version == NcVersion.Nmap - else 'while nc -l -e /bin/hostname -p 1234; do true; done')) + server = self.loop.run_until_complete(self.testnetvm.run( + 'socat TCP6-LISTEN:1234,fork EXEC:/bin/hostname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping6_ip), 0, @@ -1107,15 +1063,12 @@ def test_712_ipv6_custom_ip_firewall(self): "Ping by IP should be allowed") self.assertEqual(self.run_cmd(self.testvm1, self.ping6_name), 0, "Ping by name should be allowed") - if nc_version == NcVersion.Nmap: - nc_cmd = "nc -w 1 --recv-only {} 1234".format(self.test_ip6) - else: - nc_cmd = "nc -w 1 {} 1234".format(self.test_ip6) - self.assertNotEqual(self.run_cmd(self.testvm1, nc_cmd), 0, + client_cmd = "socat TCP:[{}]:1234 -".format(self.test_ip6) + self.assertNotEqual(self.run_cmd(self.testvm1, client_cmd), 0, "TCP connection should be blocked") finally: - nc.terminate() - self.loop.run_until_complete(nc.wait()) + server.terminate() + self.loop.run_until_complete(server.wait()) # noinspection PyAttributeOutsideInit,PyPep8Naming class VmUpdatesMixin(object): From 0b7aa546c6a7cf3b744860e7763f05d1b01a8360 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 16:49:20 +0200 Subject: [PATCH 24/33] tests: remove VM reference from QubesVMError Yet another place wheren object references are leaked. --- qubes/tests/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index b66a4ee75..991692bba 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -395,6 +395,8 @@ def cleanup_traceback(self): continue ex = exc_info[1] while ex is not None: + if isinstance(ex, qubes.exc.QubesVMError): + ex.vm = None traceback.clear_frames(ex.__traceback__) ex = ex.__context__ From f13029219b7b1312bb33cebc5299657c5056b5e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 18:19:36 +0200 Subject: [PATCH 25/33] vm: disable/enable qubes-vm@ service when domain is removed/created If domain is set to autostart, qubes-vm@ systemd service is used to start it at boot. Cleanup the service when domain is removed, and similarly enable the service when domain is created and already have autostart=True. Fixes QubesOS/qubes-issues#4014 --- qubes/vm/qubesvm.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index faa7467b1..bb4c1e039 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -874,6 +874,22 @@ def on_property_pre_del_autostart(self, event, name, oldvalue=None): raise qubes.exc.QubesException( 'Failed to reset autostart for VM in systemd') + @qubes.events.handler('domain-remove-from-disk') + def on_remove_from_disk(self, event, **kwargs): + # pylint: disable=unused-argument + if self.autostart: + subprocess.call( + ['sudo', 'systemctl', 'disable', + 'qubes-vm@{}.service'.format(self.name)]) + + @qubes.events.handler('domain-create-on-disk') + def on_create_on_disk(self, event, **kwargs): + # pylint: disable=unused-argument + if self.autostart: + subprocess.call( + ['sudo', 'systemctl', 'enable', + 'qubes-vm@{}.service'.format(self.name)]) + # # methods for changing domain state # From e244c192ae1f87e4d252747eae66f8efb8c2a2f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 21 Oct 2018 19:38:21 +0200 Subject: [PATCH 26/33] tests: use /bin/uname instead of /bin/hostname as dummy output generator Use something included in coreutils installed everywhere. --- qubes/tests/integ/network.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 1b4c9e040..12a196b45 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -220,7 +220,7 @@ def test_030_firewallvm_firewall(self): self.assertTrue(self.proxy.is_running()) server = self.loop.run_until_complete(self.testnetvm.run( - 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) + 'socat TCP-LISTEN:1234,fork EXEC:/bin/uname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -467,7 +467,7 @@ def test_202_fake_ip_firewall(self): self.assertTrue(self.proxy.is_running()) server = self.loop.run_until_complete(self.testnetvm.run( - 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) + 'socat TCP-LISTEN:1234,fork EXEC:/bin/uname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -663,7 +663,7 @@ def test_212_custom_ip_firewall(self): self.assertTrue(self.proxy.is_running()) server = self.loop.run_until_complete(self.testnetvm.run( - 'socat TCP-LISTEN:1234,fork EXEC:/bin/hostname')) + 'socat TCP-LISTEN:1234,fork EXEC:/bin/uname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping_ip), 0, @@ -823,7 +823,7 @@ def test_530_ipv6_firewallvm_firewall(self): self.assertTrue(self.proxy.is_running()) server = self.loop.run_until_complete(self.testnetvm.run( - 'socat TCP6-LISTEN:1234,fork EXEC:/bin/hostname')) + 'socat TCP6-LISTEN:1234,fork EXEC:/bin/uname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping6_ip), 0, @@ -1052,7 +1052,7 @@ def test_712_ipv6_custom_ip_firewall(self): self.assertTrue(self.proxy.is_running()) server = self.loop.run_until_complete(self.testnetvm.run( - 'socat TCP6-LISTEN:1234,fork EXEC:/bin/hostname')) + 'socat TCP6-LISTEN:1234,fork EXEC:/bin/uname')) try: self.assertEqual(self.run_cmd(self.proxy, self.ping6_ip), 0, From 8be70c9e4d22e901fb41d21b55a3828306de7f90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Tue, 23 Oct 2018 23:29:23 +0200 Subject: [PATCH 27/33] ext/services: allow for os=Linux feature request from VM It's weird to set it for Windows, but not Linux. --- qubes/ext/windows.py | 2 +- qubes/tests/ext.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/qubes/ext/windows.py b/qubes/ext/windows.py index 75420975f..0b417886f 100644 --- a/qubes/ext/windows.py +++ b/qubes/ext/windows.py @@ -34,7 +34,7 @@ def qubes_features_request(self, vm, event, untrusted_features): guest_os = None if 'os' in untrusted_features: - if untrusted_features['os'] in ['Windows']: + if untrusted_features['os'] in ['Windows', 'Linux']: guest_os = untrusted_features['os'] qrexec = None diff --git a/qubes/tests/ext.py b/qubes/tests/ext.py index 5acd183fc..6f59132fc 100644 --- a/qubes/tests/ext.py +++ b/qubes/tests/ext.py @@ -213,7 +213,7 @@ def test_002_notify_tools_other_os(self): 'version': '1', 'default-user': 'user', 'qrexec': '1', - 'os': 'Linux'}) + 'os': 'other'}) self.assertEqual(self.vm.mock_calls, []) class TC_20_Services(qubes.tests.QubesTestCase): From 2f3a9847425e1dbbf13ac5172518d6f2d05228c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Wed, 24 Oct 2018 14:13:07 +0200 Subject: [PATCH 28/33] exc: Make QubesMemoryError inherit from QubesVMError Same as other vm-related errors. This helps QubesTestCase.cleanup_traceback() cleanup VM reference. --- qubes/exc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qubes/exc.py b/qubes/exc.py index 3c7797d1f..df31f60ea 100644 --- a/qubes/exc.py +++ b/qubes/exc.py @@ -159,7 +159,7 @@ def __init__(self, msg=None): msg or 'Backup cancelled') -class QubesMemoryError(QubesException, MemoryError): +class QubesMemoryError(QubesVMError, MemoryError): '''Cannot start domain, because not enough memory is available''' def __init__(self, vm, msg=None): super(QubesMemoryError, self).__init__( From 4742a630f21ae0506c59ed60da98f62a711469f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Wed, 24 Oct 2018 14:15:18 +0200 Subject: [PATCH 29/33] tests: use iptables --wait QubesOS/qubes-issues#3665 affects also tests... --- qubes/tests/integ/network.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/qubes/tests/integ/network.py b/qubes/tests/integ/network.py index 12a196b45..8869c28bc 100644 --- a/qubes/tests/integ/network.py +++ b/qubes/tests/integ/network.py @@ -104,7 +104,8 @@ def run_netvm_cmd(cmd): run_netvm_cmd("ip link add test0 type dummy") run_netvm_cmd("ip link set test0 up") run_netvm_cmd("ip addr add {}/24 dev test0".format(self.test_ip)) - run_netvm_cmd("iptables -I INPUT -d {} -j ACCEPT".format(self.test_ip)) + run_netvm_cmd("iptables -I INPUT -d {} -j ACCEPT --wait".format( + self.test_ip)) # ignore failure self.run_cmd(self.testnetvm, "killall --wait dnsmasq") run_netvm_cmd("dnsmasq -a {ip} -A /{name}/{ip} -i test0 -z".format( From fb14f589cb0c449e32141893ebea8f508387ce7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sat, 27 Oct 2018 01:41:30 +0200 Subject: [PATCH 30/33] tests: wait for full user session before doing rest of the test Clean VM shutdown may timeout if its initiated before full startup, so make sure the full startup is completed first. --- qubes/tests/integ/basic.py | 2 ++ qubes/tests/integ/storage.py | 6 ++++++ qubes/tests/integ/vm_qrexec_gui.py | 1 + 3 files changed, 9 insertions(+) diff --git a/qubes/tests/integ/basic.py b/qubes/tests/integ/basic.py index dfb57e391..e11c63ee7 100644 --- a/qubes/tests/integ/basic.py +++ b/qubes/tests/integ/basic.py @@ -254,6 +254,7 @@ def assertVolumesExcludedFromUdev(self, vm): try: # first boot, mkfs private volume self.loop.run_until_complete(vm.start()) + self.loop.run_until_complete(self.wait_for_session(vm)) # get private volume UUID private_uuid, _ = self.loop.run_until_complete( vm.run_for_stdio('blkid -o value /dev/xvdb', user='root')) @@ -482,6 +483,7 @@ def get_rootimg_checksum(self): def _do_test(self): checksum_before = self.get_rootimg_checksum() self.loop.run_until_complete(self.test_template.start()) + self.loop.run_until_complete(self.wait_for_session(self.test_template)) self.shutdown_and_wait(self.test_template) checksum_changed = self.get_rootimg_checksum() if checksum_before == checksum_changed: diff --git a/qubes/tests/integ/storage.py b/qubes/tests/integ/storage.py index 253d8e728..a4766e185 100644 --- a/qubes/tests/integ/storage.py +++ b/qubes/tests/integ/storage.py @@ -77,6 +77,7 @@ def _test_000_volatile(self): del coro_maybe self.app.save() yield from (self.vm1.start()) + yield from self.wait_for_session(self.vm1) # volatile image not clean yield from (self.vm1.run_for_stdio( @@ -112,6 +113,7 @@ def _test_001_non_volatile(self): del coro_maybe self.app.save() yield from self.vm1.start() + yield from self.wait_for_session(self.vm1) # non-volatile image not clean yield from self.vm1.run_for_stdio( 'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.format(size), @@ -197,6 +199,9 @@ def _test_003_snapshot(self): self.app.save() yield from self.vm1.start() yield from self.vm2.start() + yield from asyncio.wait( + [self.wait_for_session(self.vm1), self.wait_for_session(self.vm2)]) + try: yield from self.vm1.run_for_stdio( @@ -285,6 +290,7 @@ def _test_004_snapshot_non_persistent(self): del coro_maybe self.app.save() yield from self.vm2.start() + yield from self.wait_for_session(self.vm2) # snapshot image not clean yield from self.vm2.run_for_stdio( diff --git a/qubes/tests/integ/vm_qrexec_gui.py b/qubes/tests/integ/vm_qrexec_gui.py index 7b87e7867..a6e532a14 100644 --- a/qubes/tests/integ/vm_qrexec_gui.py +++ b/qubes/tests/integ/vm_qrexec_gui.py @@ -64,6 +64,7 @@ def test_000_start_shutdown(self): # TODO: wait_for, timeout self.loop.run_until_complete(self.testvm1.start()) self.assertEqual(self.testvm1.get_power_state(), "Running") + self.loop.run_until_complete(self.wait_for_session(self.testvm1)) self.loop.run_until_complete(self.testvm1.shutdown(wait=True)) self.assertEqual(self.testvm1.get_power_state(), "Halted") From 84d3547f0943d910e4aaffad0c7fa7b8eb6376a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sat, 27 Oct 2018 16:21:27 +0200 Subject: [PATCH 31/33] tests: adjust extra tests loader to work with nose2 Nose loader do not provide loader.loadTestsFromTestCase(), use loader.loadTestsFromNames() instead. --- qubes/tests/extra.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/qubes/tests/extra.py b/qubes/tests/extra.py index 9eb55c927..d4ee2f34e 100644 --- a/qubes/tests/extra.py +++ b/qubes/tests/extra.py @@ -195,7 +195,8 @@ def load_tests(loader, tests, pattern): for entry in pkg_resources.iter_entry_points('qubes.tests.extra'): try: for test_case in entry.load()(): - tests.addTests(loader.loadTestsFromTestCase(test_case)) + tests.addTests(loader.loadTestsFromNames([ + '{}.{}'.format(test_case.__module__, test_case.__name__)])) except Exception as err: # pylint: disable=broad-except def runTest(self): raise err From 84c321b923f215f064867e1dfcc3eabfde74847d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sat, 27 Oct 2018 16:22:10 +0200 Subject: [PATCH 32/33] tests: increase session startup timeout for whonix-ws based VMs First boot of whonix-ws based VM take extended period of time, because a lot of files needs to be copied to private volume. This takes even more time, when verbose logging through console is enabled. Extend the timeout for that. --- qubes/tests/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index 991692bba..cd44bef58 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -1204,10 +1204,15 @@ def create_remote_file(self, vm, filename, content): @asyncio.coroutine def wait_for_session(self, vm): + timeout = 30 + if getattr(vm, 'template', None) and 'whonix-ws' in vm.template.name: + # first boot of whonix-ws takes more time because of /home + # initialization, including Tor Browser copying + timeout = 120 yield from asyncio.wait_for( vm.run_service_for_stdio( 'qubes.WaitForSession', input=vm.default_user.encode()), - timeout=30) + timeout=timeout) _templates = None From 42061cb1947669c93a583ecfd06991368a14d817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 29 Oct 2018 01:20:57 +0100 Subject: [PATCH 33/33] tests: try to collect qvm-open-in-dvm output if no editor window is shown Try to collect more details about why the test failed. This will help only if qvm-open-in-dvm exist early. On the other hand, if it hang, or remote side fails to find the right editor (which results in GUI error message), this change will not provide any more details. --- qubes/tests/integ/dispvm.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/qubes/tests/integ/dispvm.py b/qubes/tests/integ/dispvm.py index 77c47cd3d..c9e2f6972 100644 --- a/qubes/tests/integ/dispvm.py +++ b/qubes/tests/integ/dispvm.py @@ -253,12 +253,24 @@ def test_030_edit_file(self): self.testvm1.run_for_stdio("echo test1 > /home/user/test.txt")) p = self.loop.run_until_complete( - self.testvm1.run("qvm-open-in-dvm /home/user/test.txt")) + self.testvm1.run("qvm-open-in-dvm /home/user/test.txt", + stdout=subprocess.PIPE, stderr=subprocess.STDOUT)) # if first 5 windows isn't expected editor, there is no hope winid = None for _ in range(5): - winid = self.wait_for_window('disp[0-9]*', search_class=True) + try: + winid = self.wait_for_window('disp[0-9]*', search_class=True) + except Exception as e: + try: + self.loop.run_until_complete(asyncio.wait_for(p.wait(), 1)) + except asyncio.TimeoutError: + raise e + else: + stdout = self.loop.run_until_complete(p.stdout.read()) + self.fail( + 'qvm-open-in-dvm exited prematurely with {}: {}'.format( + p.returncode, stdout)) # get window title (window_title, _) = subprocess.Popen( ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE). \ @@ -278,7 +290,7 @@ def test_030_edit_file(self): time.sleep(0.5) self._handle_editor(winid) - self.loop.run_until_complete(p.wait()) + self.loop.run_until_complete(p.communicate()) (test_txt_content, _) = self.loop.run_until_complete( self.testvm1.run_for_stdio("cat /home/user/test.txt")) # Drop BOM if added by editor