From 55916cd852740296ddf08aa0a80b9ddee968acb3 Mon Sep 17 00:00:00 2001 From: Andrey Golovizin Date: Thu, 28 Dec 2017 15:32:26 +0100 Subject: [PATCH 01/12] Upload disk image via libvirt API --- nix/libvirtd.nix | 8 +-- nixops/backends/libvirtd.py | 127 +++++++++++++++++++++++++++--------- 2 files changed, 99 insertions(+), 36 deletions(-) diff --git a/nix/libvirtd.nix b/nix/libvirtd.nix index dfa4f0f99..e79f4e870 100644 --- a/nix/libvirtd.nix +++ b/nix/libvirtd.nix @@ -45,11 +45,11 @@ in ###### interface options = { - deployment.libvirtd.imageDir = mkOption { - type = types.path; - default = "/var/lib/libvirt/images"; + deployment.libvirtd.storagePool = mkOption { + type = types.str; + default = "default"; description = '' - Directory to store VM image files. Note that it should be writable both by you and by libvirtd daemon. + The name of the storage pool where the virtual disk is to be created. ''; }; diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 0e4540b40..4d0a2fe42 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -3,6 +3,7 @@ from distutils import spawn import os import copy +import json import random import shutil import string @@ -34,12 +35,11 @@ def __init__(self, xml, config): self.extra_devices = x.find("attr[@name='extraDevicesXML']/string").get("value") self.extra_domain = x.find("attr[@name='extraDomainXML']/string").get("value") self.headless = x.find("attr[@name='headless']/bool").get("value") == 'true' - self.image_dir = x.find("attr[@name='imageDir']/string").get("value") - assert self.image_dir is not None self.domain_type = x.find("attr[@name='domainType']/string").get("value") self.kernel = x.find("attr[@name='kernel']/string").get("value") self.initrd = x.find("attr[@name='initrd']/string").get("value") self.cmdline = x.find("attr[@name='cmdline']/string").get("value") + self.storage_pool_name = x.find("attr[@name='storagePool']/string").get("value") self.networks = [ k.get("value") @@ -55,6 +55,8 @@ class LibvirtdState(MachineState): primary_mac = nixops.util.attr_property("libvirtd.primaryMAC", None) domain_xml = nixops.util.attr_property("libvirtd.domainXML", None) disk_path = nixops.util.attr_property("libvirtd.diskPath", None) + storage_volume_name = nixops.util.attr_property("libvirtd.storageVolume", None) + storage_pool_name = nixops.util.attr_property("libvirtd.storagePool", None) vcpu = nixops.util.attr_property("libvirtd.vcpu", None) @classmethod @@ -69,6 +71,8 @@ def __init__(self, depl, name, id): self.log('Failed to open connection to the hypervisor') sys.exit(1) self._dom = None + self._pool = None + self._vol = None @property def dom(self): @@ -79,6 +83,18 @@ def dom(self): self.log("Warning: %s" % e) return self._dom + @property + def pool(self): + if self._pool is None: + self._pool = self.conn.storagePoolLookupByName(self.storage_pool_name) + return self._pool + + @property + def vol(self): + if self._vol is None: + self._vol = self.pool.storageVolLookupByName(self.storage_volume_name) + return self._vol + def get_console_output(self): # TODO update with self.uri when https://github.com/NixOS/nixops/pull/824 gets merged import sys @@ -115,13 +131,19 @@ def create(self, defn, check, allow_reboot, allow_recreate): assert isinstance(defn, LibvirtdDefinition) self.set_common_state(defn) self.primary_net = defn.networks[0] + self.storage_pool_name = defn.storage_pool_name + if not self.primary_mac: self._generate_primary_mac() - self.domain_xml = self._make_domain_xml(defn) - if not self.client_public_key: (self.client_private_key, self.client_public_key) = nixops.util.create_key_pair() + if self.storage_volume_name is None: + self._prepare_storage_volume() + self.storage_volume_name = self.vol.name() + + self.domain_xml = self._make_domain_xml(defn) + if self.vm_id is None: # By using "define" we ensure that the domain is # "persistent", as opposed to "transient" (i.e. removed on reboot). @@ -130,32 +152,67 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log('Failed to register domain XML with the hypervisor') return False - newEnv = copy.deepcopy(os.environ) - newEnv["NIXOPS_LIBVIRTD_PUBKEY"] = self.client_public_key - base_image = self._logged_exec( - ["nix-build"] + self.depl._eval_flags(self.depl.nix_exprs) + - ["--arg", "checkConfigurationOptions", "false", - "-A", "nodes.{0}.config.deployment.libvirtd.baseImage".format(self.name), - "-o", "{0}/libvirtd-image-{1}".format(self.depl.tempdir, self.name)], - capture_stdout=True, env=newEnv).rstrip() - - if not os.access(defn.image_dir, os.W_OK): - raise Exception('{} is not writable by this user or it does not exist'.format(defn.image_dir)) - - self.disk_path = self._disk_path(defn) - shutil.copyfile(base_image + "/disk.qcow2", self.disk_path) - # Rebase onto empty backing file to prevent breaking the disk image - # when the backing file gets garbage collected. - self._logged_exec(["qemu-img", "rebase", "-f", "qcow2", "-b", - "", self.disk_path]) - os.chmod(self.disk_path, 0660) self.vm_id = self._vm_id() self.start() return True - def _disk_path(self, defn): - return "{0}/{1}.img".format(defn.image_dir, self._vm_id()) + def _prepare_storage_volume(self): + self.logger.log("preparing disk image...") + newEnv = copy.deepcopy(os.environ) + newEnv["NIXOPS_LIBVIRTD_PUBKEY"] = self.client_public_key + base_image = self._logged_exec( + ["nix-build"] + self.depl._eval_flags(self.depl.nix_exprs) + + ["--arg", "checkConfigurationOptions", "false", + "-A", "nodes.{0}.config.deployment.libvirtd.baseImage".format(self.name), + "-o", "{0}/libvirtd-image-{1}".format(self.depl.tempdir, self.name)], + capture_stdout=True, env=newEnv).rstrip() + + temp_disk_path = os.path.join(self.depl.tempdir, 'disk.qcow2') + shutil.copyfile(base_image + "/disk.qcow2", temp_disk_path) + # Rebase onto empty backing file to prevent breaking the disk image + # when the backing file gets garbage collected. + self._logged_exec(["qemu-img", "rebase", "-f", "qcow2", "-b", + "", temp_disk_path]) + + self.logger.log("uploading disk image...") + image_info = self._get_image_info(temp_disk_path) + self._vol = self._create_volume(image_info['virtual-size'], image_info['actual-size']) + self._upload_volume(temp_disk_path, image_info['actual-size']) + + def _get_image_info(self, filename): + output = self._logged_exec(["qemu-img", "info", "--output", "json", filename], capture_stdout=True) + return json.loads(output) + + def _create_volume(self, virtual_size, actual_size): + xml = ''' + + {name} + {virtual_size} + {actual_size} + + + + + '''.format( + name="{}.qcow2".format(self._vm_id()), + virtual_size=virtual_size, + actual_size=actual_size, + ) + vol = self.pool.createXML(xml) + self._vol = vol + return vol + + def _upload_volume(self, filename, actual_size): + stream = self.conn.newStream() + self.vol.upload(stream, offset=0, length=actual_size) + + def read_file(stream, nbytes, f): + return f.read(nbytes) + + with open(filename, 'rb') as f: + stream.sendAll(read_file, f) + stream.finish() def _make_domain_xml(self, defn): qemu_executable = "qemu-system-x86_64" @@ -213,7 +270,7 @@ def _make_os(defn): self._vm_id(), defn.memory_size, qemu, - self._disk_path(defn), + self.vol.path(), defn.vcpu, defn.domain_type ) @@ -278,14 +335,20 @@ def stop(self): self.state = self.STOPPED def destroy(self, wipe=False): - if not self.vm_id: - return True self.log_start("destroying... ") - self.stop() - if self.dom.undefine() != 0: - self.log("Failed undefining domain") - return False + + if self.vm_id is not None: + self.stop() + if self.dom.undefine() != 0: + self.log("Failed undefining domain") + return False if (self.disk_path and os.path.exists(self.disk_path)): + # the deployment was created by an older NixOps version that did + # not use the libvirtd API for uploading disk images os.unlink(self.disk_path) + + if self.storage_volume_name is not None: + self.vol.delete() + return True From ece0040662db9e0e07f468b5b8c8a3d8c9ab4e22 Mon Sep 17 00:00:00 2001 From: Andrey Golovizin Date: Thu, 28 Dec 2017 15:39:51 +0100 Subject: [PATCH 02/12] Add deployment.libvirtd.URI option --- nix/libvirtd.nix | 8 ++++++++ nixops/backends/libvirtd.py | 24 ++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/nix/libvirtd.nix b/nix/libvirtd.nix index e79f4e870..dad19a194 100644 --- a/nix/libvirtd.nix +++ b/nix/libvirtd.nix @@ -53,6 +53,14 @@ in ''; }; + deployment.libvirtd.URI = mkOption { + type = types.str; + default = "qemu:///system"; + description = '' + Connection URI. + ''; + }; + deployment.libvirtd.vcpu = mkOption { default = 1; type = types.int; diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 4d0a2fe42..070edc09f 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -40,6 +40,7 @@ def __init__(self, xml, config): self.initrd = x.find("attr[@name='initrd']/string").get("value") self.cmdline = x.find("attr[@name='cmdline']/string").get("value") self.storage_pool_name = x.find("attr[@name='storagePool']/string").get("value") + self.uri = x.find("attr[@name='URI']/string").get("value") self.networks = [ k.get("value") @@ -59,6 +60,10 @@ class LibvirtdState(MachineState): storage_pool_name = nixops.util.attr_property("libvirtd.storagePool", None) vcpu = nixops.util.attr_property("libvirtd.vcpu", None) + # older deployments may not have a libvirtd.URI attribute in the state file + # using qemu:///system in such case + uri = nixops.util.attr_property("libvirtd.URI", "qemu:///system") + @classmethod def get_type(cls): return "libvirtd" @@ -66,14 +71,21 @@ def get_type(cls): def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) - self.conn = libvirt.open('qemu:///system') - if self.conn is None: - self.log('Failed to open connection to the hypervisor') - sys.exit(1) + self._conn = None self._dom = None self._pool = None self._vol = None + @property + def conn(self): + if self._conn is None: + self.logger.log('connecting to {}...'.format(self.uri)) + self._conn = libvirt.open(self.uri) + if self._conn is None: + self.log('failed to connect to {}'.format(self.uri)) + sys.exit(1) + return self._conn + @property def dom(self): if self._dom is None: @@ -96,9 +108,8 @@ def vol(self): return self._vol def get_console_output(self): - # TODO update with self.uri when https://github.com/NixOS/nixops/pull/824 gets merged import sys - return self._logged_exec(["virsh", "-c", "qemu:///system", 'console', self.vm_id.decode()], + return self._logged_exec(["virsh", "-c", self.uri, 'console', self.vm_id.decode()], stdin=sys.stdin) def get_ssh_private_key_file(self): @@ -132,6 +143,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.set_common_state(defn) self.primary_net = defn.networks[0] self.storage_pool_name = defn.storage_pool_name + self.uri = defn.uri if not self.primary_mac: self._generate_primary_mac() From 84a7aa79880c03f6d195847953473fc4a7e48b63 Mon Sep 17 00:00:00 2001 From: Andrey Golovizin Date: Thu, 28 Dec 2017 15:40:21 +0100 Subject: [PATCH 03/12] Determine QEMU executable via libvirt API --- nixops/backends/libvirtd.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 070edc09f..5c60c0d53 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -9,6 +9,8 @@ import string import subprocess import time +from xml.etree import ElementTree + import libvirt from nixops.backends import MachineDefinition, MachineState @@ -145,6 +147,11 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.storage_pool_name = defn.storage_pool_name self.uri = defn.uri + # required for virConnectGetDomainCapabilities() + # https://libvirt.org/formatdomaincaps.html + if self.conn.getLibVersion() < 1002007: + raise Exception('libvirt 1.2.7 or newer is required at the target host') + if not self.primary_mac: self._generate_primary_mac() if not self.client_public_key: @@ -226,10 +233,15 @@ def read_file(stream, nbytes, f): stream.sendAll(read_file, f) stream.finish() + def _get_qemu_executable(self): + domaincaps_xml = self.conn.getDomainCapabilities( + emulatorbin=None, arch='x86_64', machine=None, virttype='kvm', + ) + domaincaps = ElementTree.fromstring(domaincaps_xml) + return domaincaps.find('./path').text.strip() + def _make_domain_xml(self, defn): - qemu_executable = "qemu-system-x86_64" - qemu = spawn.find_executable(qemu_executable) - assert qemu is not None, "{} executable not found. Please install QEMU first.".format(qemu_executable) + qemu = self._get_qemu_executable() def maybe_mac(n): if n == self.primary_net: From 7145c225fa9eff8dc1d46138f52f5f0ed898059a Mon Sep 17 00:00:00 2001 From: Florian Klink Date: Fri, 2 Feb 2018 00:37:37 +0100 Subject: [PATCH 04/12] libvirtd: remove unused imports --- nixops/backends/libvirtd.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 5c60c0d53..5f4db076b 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -1,21 +1,19 @@ # -*- coding: utf-8 -*- -from distutils import spawn -import os import copy import json +import os import random import shutil -import string -import subprocess import time from xml.etree import ElementTree import libvirt -from nixops.backends import MachineDefinition, MachineState import nixops.known_hosts import nixops.util +from nixops.backends import MachineDefinition, MachineState + # to prevent libvirt errors from appearing on screen, see # https://www.redhat.com/archives/libvirt-users/2017-August/msg00011.html @@ -263,10 +261,9 @@ def _make_os(defn): ' hvm', " %s" % defn.kernel, " %s" % defn.initrd if len(defn.kernel) > 0 else "", - " %s"% defn.cmdline if len(defn.kernel) > 0 else "", + " %s" % defn.cmdline if len(defn.kernel) > 0 else "", ''] - domain_fmt = "\n".join([ '', ' {0}', From 7940d93be262700e93edc15f770025a0ed039eee Mon Sep 17 00:00:00 2001 From: Florian Klink Date: Fri, 2 Feb 2018 12:36:38 +0100 Subject: [PATCH 05/12] backends.libvirtd: use machine name for temp_image_path Otherwise, deployments with multiple VMs try to write to the same image. Also, do the temp_image_path calculation only once. --- nixops/backends/libvirtd.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 5f4db076b..350b73799 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -178,15 +178,17 @@ def _prepare_storage_volume(self): self.logger.log("preparing disk image...") newEnv = copy.deepcopy(os.environ) newEnv["NIXOPS_LIBVIRTD_PUBKEY"] = self.client_public_key + + temp_image_path = os.path.join(self.depl.tempdir, 'libvirtd-image-{}'.format(self.name)) base_image = self._logged_exec( ["nix-build"] + self.depl._eval_flags(self.depl.nix_exprs) + ["--arg", "checkConfigurationOptions", "false", "-A", "nodes.{0}.config.deployment.libvirtd.baseImage".format(self.name), - "-o", "{0}/libvirtd-image-{1}".format(self.depl.tempdir, self.name)], + "-o", temp_image_path], capture_stdout=True, env=newEnv).rstrip() - temp_disk_path = os.path.join(self.depl.tempdir, 'disk.qcow2') - shutil.copyfile(base_image + "/disk.qcow2", temp_disk_path) + temp_disk_path = os.path.join(self.depl.tempdir, 'disk-{}.qcow2'.format(self.name)) + shutil.copyfile(os.path.join(temp_image_path, 'disk.qcow2'), temp_disk_path) # Rebase onto empty backing file to prevent breaking the disk image # when the backing file gets garbage collected. self._logged_exec(["qemu-img", "rebase", "-f", "qcow2", "-b", @@ -302,7 +304,7 @@ def _parse_ip(self): """ # alternative is VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE if qemu agent is available ifaces = self.dom.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0) - if (ifaces == None): + if ifaces is None: self.log("Failed to get domain interfaces") return From 36f426d1b4da9ae75e880b991986778b4ceb1fc2 Mon Sep 17 00:00:00 2001 From: Andrey Golovizin Date: Mon, 26 Feb 2018 20:13:02 +0100 Subject: [PATCH 06/12] Update documentation --- doc/manual/overview.xml | 104 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 95 insertions(+), 9 deletions(-) diff --git a/doc/manual/overview.xml b/doc/manual/overview.xml index 133c2cbbd..4d395a09b 100644 --- a/doc/manual/overview.xml +++ b/doc/manual/overview.xml @@ -1525,15 +1525,6 @@ add your user to libvirtd group and change firewall not to filter DHCP packets. -Next we have to make sure our user has access to create images by - executing: - - $ sudo mkdir /var/lib/libvirt/images - $ sudo chgrp libvirtd /var/lib/libvirt/images - $ sudo chmod g+w /var/lib/libvirt/images - - - We're ready to create the deployment, start by creating example.nix: @@ -1602,6 +1593,101 @@ deployment.libvirtd.extraDevicesXML = '' +
+Remote libvirtd server + + +By default, NixOps uses the local libvirtd daemon (qemu:///system). It is also possible to +deploy to a +remote libvirtd server. +Remote deployment requires a couple of things: + + + + Pointing deployment.libvirtd.URI to the + remote libvirtd server + instead of qemu:///system. + + + + Configuring the network to ensure the VM running on the remote server is + reachable from the local machine. This is required so that NixOps can reach the + newly created VM by SSH to finish the deployment. + + + + + +Example: suppose the remote libvirtd server is located at 10.2.0.15. + + +First, create a new routed +virtual network on the libvirtd server. In this example we'll use the +192.168.122.0/24 network named routed. + + + +Next, add a route to the virtual network via the remote libvirtd server. This +can be done by running this command on the local machine: + + +# ip route add to 192.168.122.0/24 via 10.2.0.15 + + + + +Now, create a NixOps configuration file remote-libvirtd.nix: + +{ + example = { + deployment.targetEnv = "libvirtd"; + deployment.libvirtd.URI = "qemu+ssh://10.2.0.15/system"; + deployment.libvirtd.networks = [ "routed" ]; + }; +} + + + + +Finally, deploy it with NixOps: + + +$ nixops create -d remote-libvirtd ./remote-libvirtd.nix +$ nixops deploy -d remote-libvirtd + + + +
+ +
+Libvirtd storage pools + + +By default, NixOps uses the default +storage pool which +usually corresponds to the /var/lib/libvirt/images +directory. You can choose another storage pool with the +deployment.libvirtd.storagePool option: + + +{ + example = { + deployment.targetEnv = "libvirtd"; + deployment.libvirtd.storagePool = "mystoragepool"; + }; +} + + + + + NixOps has only been tested with storage pools of type dir (filesystem directory). + Attempting to use a storage pool of any other type with NixOps may not work as expected. + + + +
+
Deploying Datadog resources From aa69640b05bf48db21ea20dcc1cde508b6f61d75 Mon Sep 17 00:00:00 2001 From: Andrey Golovizin Date: Sun, 11 Mar 2018 23:12:34 +0100 Subject: [PATCH 07/12] Handle libvirt connection errors --- nixops/backends/libvirtd.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 350b73799..644d75ed5 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -80,10 +80,14 @@ def __init__(self, depl, name, id): def conn(self): if self._conn is None: self.logger.log('connecting to {}...'.format(self.uri)) - self._conn = libvirt.open(self.uri) - if self._conn is None: - self.log('failed to connect to {}'.format(self.uri)) - sys.exit(1) + try: + self._conn = libvirt.open(self.uri) + except libvirt.libvirtError as error: + self.logger.error(error.get_error_message()) + if error.get_error_code() == libvirt.VIR_ERR_NO_CONNECT: + # this error code usually means "no connection driver available for qemu:///..." + self.logger.error('make sure qemu-system-x86_64 is installed on the target host') + raise Exception('failed to connect to {}'.format(self.uri)) return self._conn @property From 574ba398fee39010428afafe880ff07415a3a561 Mon Sep 17 00:00:00 2001 From: Marius Bergmann Date: Fri, 13 Apr 2018 18:55:16 +0200 Subject: [PATCH 08/12] Eliminate duplicate SSH flags `MachineState.run_command()` passes SSH flags to `self.ssh.run_command()`. However, `self.get_ssh_flags()` is already registered as a `ssh_flag_fun` in the class `__init__()` function, so `ssh_util.SSH` already uses it to get the flags when initiating a connection. This lead to the SSH flags being duplicated, which causes an error for some flags (e.g. the `-J` flag, which can only be specified once). --- nixops/backends/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nixops/backends/__init__.py b/nixops/backends/__init__.py index cbfd4734d..9162b84bf 100644 --- a/nixops/backends/__init__.py +++ b/nixops/backends/__init__.py @@ -334,7 +334,8 @@ def run_command(self, command, **kwargs): # mainly operating in a chroot environment. if self.state == self.RESCUE: command = "export LANG= LC_ALL= LC_TIME=; " + command - return self.ssh.run_command(command, self.get_ssh_flags(), **kwargs) + + return self.ssh.run_command(command, **kwargs) def switch_to_configuration(self, method, sync, command=None): """ From deafeb422bebdcccbd66f1cda094a3d471c097c5 Mon Sep 17 00:00:00 2001 From: Marius Bergmann Date: Fri, 13 Apr 2018 19:03:45 +0200 Subject: [PATCH 09/12] libvirtd: move files to separate directory Offers better separation, especially when additional features will be added. --- nix/{libvirtd.nix => libvirtd/default.nix} | 2 +- nix/{libvirtd-image.nix => libvirtd/image.nix} | 0 nix/options.nix | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename nix/{libvirtd.nix => libvirtd/default.nix} (98%) rename nix/{libvirtd-image.nix => libvirtd/image.nix} (100%) diff --git a/nix/libvirtd.nix b/nix/libvirtd/default.nix similarity index 98% rename from nix/libvirtd.nix rename to nix/libvirtd/default.nix index dad19a194..84216e6a5 100644 --- a/nix/libvirtd.nix +++ b/nix/libvirtd/default.nix @@ -4,7 +4,7 @@ with lib; let sz = toString config.deployment.libvirtd.baseImageSize; - base_image = import ./libvirtd-image.nix { size = sz; }; + base_image = import ./image.nix { size = sz; }; the_key = builtins.getEnv "NIXOPS_LIBVIRTD_PUBKEY"; ssh_image = pkgs.vmTools.runInLinuxVM ( pkgs.runCommand "libvirtd-ssh-image" diff --git a/nix/libvirtd-image.nix b/nix/libvirtd/image.nix similarity index 100% rename from nix/libvirtd-image.nix rename to nix/libvirtd/image.nix diff --git a/nix/options.nix b/nix/options.nix index 0866c3ab8..89a719e26 100644 --- a/nix/options.nix +++ b/nix/options.nix @@ -23,7 +23,7 @@ in ./gce.nix ./hetzner.nix ./container.nix - ./libvirtd.nix + ./libvirtd ]; From d193ef803b60e2630013b46357627e9bc1f90121 Mon Sep 17 00:00:00 2001 From: Marius Bergmann Date: Fri, 13 Apr 2018 19:08:24 +0200 Subject: [PATCH 10/12] libvirtd: connect to guest using the hypervisor as a jumphost This helps in situations when there's no network connectivity to the guest, e.g. when the hypervisor host can be reached via a VPN, but the guest host cannot. --- nixops/backends/libvirtd.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index 644d75ed5..a4cab1ddb 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -121,8 +121,12 @@ def get_ssh_private_key_file(self): def get_ssh_flags(self, *args, **kwargs): super_flags = super(LibvirtdState, self).get_ssh_flags(*args, **kwargs) + from urlparse import urlparse + url = urlparse(self.uri) + jumphost = url.netloc.encode('utf-8') return super_flags + ["-o", "StrictHostKeyChecking=no", - "-i", self.get_ssh_private_key_file()] + "-i", self.get_ssh_private_key_file(), + "-J", jumphost] def get_physical_spec(self): return {('users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self.client_public_key]} From c6c435134bd124fd8c10fda41c86fa1d0de725c6 Mon Sep 17 00:00:00 2001 From: Marius Bergmann Date: Fri, 13 Apr 2018 19:11:23 +0200 Subject: [PATCH 11/12] libvirtd: add qemu-guest profile to the initial image This mainly adds driver support for virtio drivers to the initial image of the guest provisioning. --- nix/libvirtd/image.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nix/libvirtd/image.nix b/nix/libvirtd/image.nix index 0b6e3472a..517dfdb9b 100644 --- a/nix/libvirtd/image.nix +++ b/nix/libvirtd/image.nix @@ -4,6 +4,11 @@ let config = (import { inherit system; modules = [ { + + imports = [ + + ]; + fileSystems."/".device = "/dev/disk/by-label/nixos"; boot.loader.grub.version = 2; From 6a6d9e3df233d4f6d7366647d4c9bd954dd0fea9 Mon Sep 17 00:00:00 2001 From: Marius Bergmann Date: Fri, 13 Apr 2018 19:26:06 +0200 Subject: [PATCH 12/12] libvirtd: add bridged networking to qemu guests This is a WIP! - Replaced `deployment.libvirtd.networks` option with a submodule to allow not only (libvirt) network names, but other networking types as well. - Domain XML was adjusted accordingly to incorporate the parameters from the new `networks` submodule. - Added the qemu guest agent to guests to allow for out-of-band communication (no need for network connectivity) with the hypervisor. - Guest IP (for provisioning after guest has started) is no longer determined by waiting for the guest to get a DHCP lease in the hypervisor libvirt network. If the guest has a static IP, it won't ask for a DHCP lease. Also, for bridged networking, we probably will not have access to the DHCP server. - Instead, the address of the first interface is retrieved from libvirt using the `VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT` method, which can now be done because of the newly added qemu guest agent. --- nix/libvirtd/default.nix | 23 +++++++-- nix/libvirtd/network-options.nix | 23 +++++++++ nixops/backends/libvirtd.py | 84 ++++++++++++++++++++------------ 3 files changed, 95 insertions(+), 35 deletions(-) create mode 100644 nix/libvirtd/network-options.nix diff --git a/nix/libvirtd/default.nix b/nix/libvirtd/default.nix index 84216e6a5..de9ba2946 100644 --- a/nix/libvirtd/default.nix +++ b/nix/libvirtd/default.nix @@ -104,9 +104,11 @@ in }; deployment.libvirtd.networks = mkOption { - default = [ "default" ]; - type = types.listOf types.str; - description = "Names of libvirt networks to attach the VM to."; + type = types.listOf (types.submodule (import ./network-options.nix { + inherit lib; + })); + default = []; + description = "Networks to attach the VM to."; }; deployment.libvirtd.extraDevicesXML = mkOption { @@ -164,6 +166,21 @@ in services.openssh.extraConfig = "UseDNS no"; deployment.hasFastConnection = true; + + services.udev.extraRules = '' + SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service" + ''; + + systemd.services.qemu-guest-agent = { + description = "QEMU Guest Agent"; + bindsTo = [ "dev-virtio\\x2dports-org.qemu.guest_agent.0.device" ]; + after = [ "dev-virtio\\x2dports-org.qemu.guest_agent.0.device" ]; + serviceConfig = { + ExecStart = "-${pkgs.kvm}/bin/qemu-ga"; + Restart = "always"; + RestartSec = 0; + }; + }; }; } diff --git a/nix/libvirtd/network-options.nix b/nix/libvirtd/network-options.nix new file mode 100644 index 000000000..d72da5962 --- /dev/null +++ b/nix/libvirtd/network-options.nix @@ -0,0 +1,23 @@ +{ lib } : + +with lib; +{ + options = { + + source = mkOption { + type = types.str; + default = "default"; + description = '' + ''; + }; + + type = mkOption { + type = types.enum [ "bridge" "virtual" ]; + default = "virtual"; + description = '' + ''; + }; + + }; + +} diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py index a4cab1ddb..a25cd4c3b 100644 --- a/nixops/backends/libvirtd.py +++ b/nixops/backends/libvirtd.py @@ -18,6 +18,29 @@ # to prevent libvirt errors from appearing on screen, see # https://www.redhat.com/archives/libvirt-users/2017-August/msg00011.html + +class LibvirtdNetwork: + + INTERFACE_TYPES = { + 'virtual': 'network', + 'bridge': 'bridge', + } + + def __init__(self, **kwargs): + self.type = kwargs['type'] + self.source = kwargs['source'] + + @property + def interface_type(self): + return self.INTERFACE_TYPES[self.type] + + @classmethod + def from_xml(cls, x): + type = x.find("attr[@name='type']/string").get("value") + source = x.find("attr[@name='source']/string").get("value") + return cls(type=type, source=source) + + class LibvirtdDefinition(MachineDefinition): """Definition of a trivial machine.""" @@ -43,8 +66,8 @@ def __init__(self, xml, config): self.uri = x.find("attr[@name='URI']/string").get("value") self.networks = [ - k.get("value") - for k in x.findall("attr[@name='networks']/list/string")] + LibvirtdNetwork.from_xml(n) + for n in x.findall("attr[@name='networks']/list/*")] assert len(self.networks) > 0 @@ -52,8 +75,6 @@ class LibvirtdState(MachineState): private_ipv4 = nixops.util.attr_property("privateIpv4", None) client_public_key = nixops.util.attr_property("libvirtd.clientPublicKey", None) client_private_key = nixops.util.attr_property("libvirtd.clientPrivateKey", None) - primary_net = nixops.util.attr_property("libvirtd.primaryNet", None) - primary_mac = nixops.util.attr_property("libvirtd.primaryMAC", None) domain_xml = nixops.util.attr_property("libvirtd.domainXML", None) disk_path = nixops.util.attr_property("libvirtd.diskPath", None) storage_volume_name = nixops.util.attr_property("libvirtd.storageVolume", None) @@ -139,17 +160,9 @@ def address_to(self, m): def _vm_id(self): return "nixops-{0}-{1}".format(self.depl.uuid, self.name) - def _generate_primary_mac(self): - mac = [0x52, 0x54, 0x00, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - self.primary_mac = ':'.join(map(lambda x: "%02x" % x, mac)) - def create(self, defn, check, allow_reboot, allow_recreate): assert isinstance(defn, LibvirtdDefinition) self.set_common_state(defn) - self.primary_net = defn.networks[0] self.storage_pool_name = defn.storage_pool_name self.uri = defn.uri @@ -158,8 +171,6 @@ def create(self, defn, check, allow_reboot, allow_recreate): if self.conn.getLibVersion() < 1002007: raise Exception('libvirt 1.2.7 or newer is required at the target host') - if not self.primary_mac: - self._generate_primary_mac() if not self.client_public_key: (self.client_private_key, self.client_public_key) = nixops.util.create_key_pair() @@ -251,19 +262,15 @@ def _get_qemu_executable(self): def _make_domain_xml(self, defn): qemu = self._get_qemu_executable() - def maybe_mac(n): - if n == self.primary_net: - return '' - else: - return "" - def iface(n): return "\n".join([ - ' ', - maybe_mac(n), - ' ', + ' ', + ' ', ' ', - ]).format(n) + ]).format( + interface_type=n.interface_type, + source=n.source, + ) def _make_os(defn): return [ @@ -291,6 +298,10 @@ def _make_os(defn): ' ' if not defn.headless else "", ' ', ' ', + ' ', + ' ', + '
', + ' ', defn.extra_devices, ' ', defn.extra_domain, @@ -310,19 +321,29 @@ def _parse_ip(self): """ return an ip v4 """ - # alternative is VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE if qemu agent is available - ifaces = self.dom.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0) + + dom_xml_str = self.dom.XMLDesc(0) + xml = ElementTree.fromstring(dom_xml_str) + first_iface_mac = xml.find('.//interface[1]/mac').get('address') + + try: + ifaces = self.dom.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT, 0) + except libvirt.libvirtError: + return + if ifaces is None: self.log("Failed to get domain interfaces") return - for (name, val) in ifaces.iteritems(): - if val['addrs']: - for ipaddr in val['addrs']: - return ipaddr['addr'] + first_iface = next(v for k, v in ifaces.iteritems() + if v.get('hwaddr', None) == first_iface_mac) + + addrs = first_iface.get('addrs', []) + + return addrs[0]['addr'] + def _wait_for_ip(self, prev_time): - self.log_start("waiting for IP address to appear in DHCP leases...") while True: ip = self._parse_ip() if ip: @@ -342,7 +363,6 @@ def _is_running(self): def start(self): assert self.vm_id assert self.domain_xml - assert self.primary_net if self._is_running(): self.log("connecting...") self.private_ipv4 = self._parse_ip()