diff --git a/doc/manual/overview.xml b/doc/manual/overview.xml
index 133c2cbbd..4d395a09b 100644
--- a/doc/manual/overview.xml
+++ b/doc/manual/overview.xml
@@ -1525,15 +1525,6 @@ add your user to libvirtd group and change firewall not to filter DHCP packets.
-Next we have to make sure our user has access to create images by
- executing:
-
- $ sudo mkdir /var/lib/libvirt/images
- $ sudo chgrp libvirtd /var/lib/libvirt/images
- $ sudo chmod g+w /var/lib/libvirt/images
-
-
-
We're ready to create the deployment, start by creating
example.nix:
@@ -1602,6 +1593,101 @@ deployment.libvirtd.extraDevicesXML = ''
+
+Remote libvirtd server
+
+
+By default, NixOps uses the local libvirtd daemon (qemu:///system). It is also possible to
+deploy to a
+remote libvirtd server.
+Remote deployment requires a couple of things:
+
+
+
+ Pointing deployment.libvirtd.URI
to the
+ remote libvirtd server
+ instead of qemu:///system.
+
+
+
+ Configuring the network to ensure the VM running on the remote server is
+ reachable from the local machine. This is required so that NixOps can reach the
+ newly created VM by SSH to finish the deployment.
+
+
+
+
+
+Example: suppose the remote libvirtd server is located at 10.2.0.15.
+
+
+First, create a new routed
+virtual network on the libvirtd server. In this example we'll use the
+192.168.122.0/24 network named routed.
+
+
+
+Next, add a route to the virtual network via the remote libvirtd server. This
+can be done by running this command on the local machine:
+
+
+# ip route add to 192.168.122.0/24 via 10.2.0.15
+
+
+
+
+Now, create a NixOps configuration file remote-libvirtd.nix:
+
+{
+ example = {
+ deployment.targetEnv = "libvirtd";
+ deployment.libvirtd.URI = "qemu+ssh://10.2.0.15/system";
+ deployment.libvirtd.networks = [ "routed" ];
+ };
+}
+
+
+
+
+Finally, deploy it with NixOps:
+
+
+$ nixops create -d remote-libvirtd ./remote-libvirtd.nix
+$ nixops deploy -d remote-libvirtd
+
+
+
+
+
+
+Libvirtd storage pools
+
+
+By default, NixOps uses the default
+storage pool which
+usually corresponds to the /var/lib/libvirt/images
+directory. You can choose another storage pool with the
+deployment.libvirtd.storagePool
option:
+
+
+{
+ example = {
+ deployment.targetEnv = "libvirtd";
+ deployment.libvirtd.storagePool = "mystoragepool";
+ };
+}
+
+
+
+
+ NixOps has only been tested with storage pools of type dir
(filesystem directory).
+ Attempting to use a storage pool of any other type with NixOps may not work as expected.
+
+
+
+
+
Deploying Datadog resources
diff --git a/nix/libvirtd.nix b/nix/libvirtd/default.nix
similarity index 78%
rename from nix/libvirtd.nix
rename to nix/libvirtd/default.nix
index dfa4f0f99..de9ba2946 100644
--- a/nix/libvirtd.nix
+++ b/nix/libvirtd/default.nix
@@ -4,7 +4,7 @@ with lib;
let
sz = toString config.deployment.libvirtd.baseImageSize;
- base_image = import ./libvirtd-image.nix { size = sz; };
+ base_image = import ./image.nix { size = sz; };
the_key = builtins.getEnv "NIXOPS_LIBVIRTD_PUBKEY";
ssh_image = pkgs.vmTools.runInLinuxVM (
pkgs.runCommand "libvirtd-ssh-image"
@@ -45,11 +45,19 @@ in
###### interface
options = {
- deployment.libvirtd.imageDir = mkOption {
- type = types.path;
- default = "/var/lib/libvirt/images";
+ deployment.libvirtd.storagePool = mkOption {
+ type = types.str;
+ default = "default";
+ description = ''
+ The name of the storage pool where the virtual disk is to be created.
+ '';
+ };
+
+ deployment.libvirtd.URI = mkOption {
+ type = types.str;
+ default = "qemu:///system";
description = ''
- Directory to store VM image files. Note that it should be writable both by you and by libvirtd daemon.
+ Connection URI.
'';
};
@@ -96,9 +104,11 @@ in
};
deployment.libvirtd.networks = mkOption {
- default = [ "default" ];
- type = types.listOf types.str;
- description = "Names of libvirt networks to attach the VM to.";
+ type = types.listOf (types.submodule (import ./network-options.nix {
+ inherit lib;
+ }));
+ default = [];
+ description = "Networks to attach the VM to.";
};
deployment.libvirtd.extraDevicesXML = mkOption {
@@ -156,6 +166,21 @@ in
services.openssh.extraConfig = "UseDNS no";
deployment.hasFastConnection = true;
+
+ services.udev.extraRules = ''
+ SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service"
+ '';
+
+ systemd.services.qemu-guest-agent = {
+ description = "QEMU Guest Agent";
+ bindsTo = [ "dev-virtio\\x2dports-org.qemu.guest_agent.0.device" ];
+ after = [ "dev-virtio\\x2dports-org.qemu.guest_agent.0.device" ];
+ serviceConfig = {
+ ExecStart = "-${pkgs.kvm}/bin/qemu-ga";
+ Restart = "always";
+ RestartSec = 0;
+ };
+ };
};
}
diff --git a/nix/libvirtd-image.nix b/nix/libvirtd/image.nix
similarity index 97%
rename from nix/libvirtd-image.nix
rename to nix/libvirtd/image.nix
index 0b6e3472a..517dfdb9b 100644
--- a/nix/libvirtd-image.nix
+++ b/nix/libvirtd/image.nix
@@ -4,6 +4,11 @@ let
config = (import {
inherit system;
modules = [ {
+
+ imports = [
+
+ ];
+
fileSystems."/".device = "/dev/disk/by-label/nixos";
boot.loader.grub.version = 2;
diff --git a/nix/libvirtd/network-options.nix b/nix/libvirtd/network-options.nix
new file mode 100644
index 000000000..d72da5962
--- /dev/null
+++ b/nix/libvirtd/network-options.nix
@@ -0,0 +1,23 @@
+{ lib } :
+
+with lib;
+{
+ options = {
+
+ source = mkOption {
+ type = types.str;
+ default = "default";
+ description = ''
+ '';
+ };
+
+ type = mkOption {
+ type = types.enum [ "bridge" "virtual" ];
+ default = "virtual";
+ description = ''
+ '';
+ };
+
+ };
+
+}
diff --git a/nix/options.nix b/nix/options.nix
index 0866c3ab8..89a719e26 100644
--- a/nix/options.nix
+++ b/nix/options.nix
@@ -23,7 +23,7 @@ in
./gce.nix
./hetzner.nix
./container.nix
- ./libvirtd.nix
+ ./libvirtd
];
diff --git a/nixops/backends/__init__.py b/nixops/backends/__init__.py
index cbfd4734d..9162b84bf 100644
--- a/nixops/backends/__init__.py
+++ b/nixops/backends/__init__.py
@@ -334,7 +334,8 @@ def run_command(self, command, **kwargs):
# mainly operating in a chroot environment.
if self.state == self.RESCUE:
command = "export LANG= LC_ALL= LC_TIME=; " + command
- return self.ssh.run_command(command, self.get_ssh_flags(), **kwargs)
+
+ return self.ssh.run_command(command, **kwargs)
def switch_to_configuration(self, method, sync, command=None):
"""
diff --git a/nixops/backends/libvirtd.py b/nixops/backends/libvirtd.py
index 0e4540b40..a25cd4c3b 100644
--- a/nixops/backends/libvirtd.py
+++ b/nixops/backends/libvirtd.py
@@ -1,22 +1,46 @@
# -*- coding: utf-8 -*-
-from distutils import spawn
-import os
import copy
+import json
+import os
import random
import shutil
-import string
-import subprocess
import time
+from xml.etree import ElementTree
+
import libvirt
-from nixops.backends import MachineDefinition, MachineState
import nixops.known_hosts
import nixops.util
+from nixops.backends import MachineDefinition, MachineState
+
# to prevent libvirt errors from appearing on screen, see
# https://www.redhat.com/archives/libvirt-users/2017-August/msg00011.html
+
+class LibvirtdNetwork:
+
+ INTERFACE_TYPES = {
+ 'virtual': 'network',
+ 'bridge': 'bridge',
+ }
+
+ def __init__(self, **kwargs):
+ self.type = kwargs['type']
+ self.source = kwargs['source']
+
+ @property
+ def interface_type(self):
+ return self.INTERFACE_TYPES[self.type]
+
+ @classmethod
+ def from_xml(cls, x):
+ type = x.find("attr[@name='type']/string").get("value")
+ source = x.find("attr[@name='source']/string").get("value")
+ return cls(type=type, source=source)
+
+
class LibvirtdDefinition(MachineDefinition):
"""Definition of a trivial machine."""
@@ -34,16 +58,16 @@ def __init__(self, xml, config):
self.extra_devices = x.find("attr[@name='extraDevicesXML']/string").get("value")
self.extra_domain = x.find("attr[@name='extraDomainXML']/string").get("value")
self.headless = x.find("attr[@name='headless']/bool").get("value") == 'true'
- self.image_dir = x.find("attr[@name='imageDir']/string").get("value")
- assert self.image_dir is not None
self.domain_type = x.find("attr[@name='domainType']/string").get("value")
self.kernel = x.find("attr[@name='kernel']/string").get("value")
self.initrd = x.find("attr[@name='initrd']/string").get("value")
self.cmdline = x.find("attr[@name='cmdline']/string").get("value")
+ self.storage_pool_name = x.find("attr[@name='storagePool']/string").get("value")
+ self.uri = x.find("attr[@name='URI']/string").get("value")
self.networks = [
- k.get("value")
- for k in x.findall("attr[@name='networks']/list/string")]
+ LibvirtdNetwork.from_xml(n)
+ for n in x.findall("attr[@name='networks']/list/*")]
assert len(self.networks) > 0
@@ -51,12 +75,16 @@ class LibvirtdState(MachineState):
private_ipv4 = nixops.util.attr_property("privateIpv4", None)
client_public_key = nixops.util.attr_property("libvirtd.clientPublicKey", None)
client_private_key = nixops.util.attr_property("libvirtd.clientPrivateKey", None)
- primary_net = nixops.util.attr_property("libvirtd.primaryNet", None)
- primary_mac = nixops.util.attr_property("libvirtd.primaryMAC", None)
domain_xml = nixops.util.attr_property("libvirtd.domainXML", None)
disk_path = nixops.util.attr_property("libvirtd.diskPath", None)
+ storage_volume_name = nixops.util.attr_property("libvirtd.storageVolume", None)
+ storage_pool_name = nixops.util.attr_property("libvirtd.storagePool", None)
vcpu = nixops.util.attr_property("libvirtd.vcpu", None)
+ # older deployments may not have a libvirtd.URI attribute in the state file
+ # using qemu:///system in such case
+ uri = nixops.util.attr_property("libvirtd.URI", "qemu:///system")
+
@classmethod
def get_type(cls):
return "libvirtd"
@@ -64,11 +92,24 @@ def get_type(cls):
def __init__(self, depl, name, id):
MachineState.__init__(self, depl, name, id)
- self.conn = libvirt.open('qemu:///system')
- if self.conn is None:
- self.log('Failed to open connection to the hypervisor')
- sys.exit(1)
+ self._conn = None
self._dom = None
+ self._pool = None
+ self._vol = None
+
+ @property
+ def conn(self):
+ if self._conn is None:
+ self.logger.log('connecting to {}...'.format(self.uri))
+ try:
+ self._conn = libvirt.open(self.uri)
+ except libvirt.libvirtError as error:
+ self.logger.error(error.get_error_message())
+ if error.get_error_code() == libvirt.VIR_ERR_NO_CONNECT:
+ # this error code usually means "no connection driver available for qemu:///..."
+ self.logger.error('make sure qemu-system-x86_64 is installed on the target host')
+ raise Exception('failed to connect to {}'.format(self.uri))
+ return self._conn
@property
def dom(self):
@@ -79,10 +120,21 @@ def dom(self):
self.log("Warning: %s" % e)
return self._dom
+ @property
+ def pool(self):
+ if self._pool is None:
+ self._pool = self.conn.storagePoolLookupByName(self.storage_pool_name)
+ return self._pool
+
+ @property
+ def vol(self):
+ if self._vol is None:
+ self._vol = self.pool.storageVolLookupByName(self.storage_volume_name)
+ return self._vol
+
def get_console_output(self):
- # TODO update with self.uri when https://github.com/NixOS/nixops/pull/824 gets merged
import sys
- return self._logged_exec(["virsh", "-c", "qemu:///system", 'console', self.vm_id.decode()],
+ return self._logged_exec(["virsh", "-c", self.uri, 'console', self.vm_id.decode()],
stdin=sys.stdin)
def get_ssh_private_key_file(self):
@@ -90,8 +142,12 @@ def get_ssh_private_key_file(self):
def get_ssh_flags(self, *args, **kwargs):
super_flags = super(LibvirtdState, self).get_ssh_flags(*args, **kwargs)
+ from urlparse import urlparse
+ url = urlparse(self.uri)
+ jumphost = url.netloc.encode('utf-8')
return super_flags + ["-o", "StrictHostKeyChecking=no",
- "-i", self.get_ssh_private_key_file()]
+ "-i", self.get_ssh_private_key_file(),
+ "-J", jumphost]
def get_physical_spec(self):
return {('users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self.client_public_key]}
@@ -104,24 +160,26 @@ def address_to(self, m):
def _vm_id(self):
return "nixops-{0}-{1}".format(self.depl.uuid, self.name)
- def _generate_primary_mac(self):
- mac = [0x52, 0x54, 0x00,
- random.randint(0x00, 0x7f),
- random.randint(0x00, 0xff),
- random.randint(0x00, 0xff)]
- self.primary_mac = ':'.join(map(lambda x: "%02x" % x, mac))
-
def create(self, defn, check, allow_reboot, allow_recreate):
assert isinstance(defn, LibvirtdDefinition)
self.set_common_state(defn)
- self.primary_net = defn.networks[0]
- if not self.primary_mac:
- self._generate_primary_mac()
- self.domain_xml = self._make_domain_xml(defn)
+ self.storage_pool_name = defn.storage_pool_name
+ self.uri = defn.uri
+
+ # required for virConnectGetDomainCapabilities()
+ # https://libvirt.org/formatdomaincaps.html
+ if self.conn.getLibVersion() < 1002007:
+ raise Exception('libvirt 1.2.7 or newer is required at the target host')
if not self.client_public_key:
(self.client_private_key, self.client_public_key) = nixops.util.create_key_pair()
+ if self.storage_volume_name is None:
+ self._prepare_storage_volume()
+ self.storage_volume_name = self.vol.name()
+
+ self.domain_xml = self._make_domain_xml(defn)
+
if self.vm_id is None:
# By using "define" we ensure that the domain is
# "persistent", as opposed to "transient" (i.e. removed on reboot).
@@ -130,51 +188,89 @@ def create(self, defn, check, allow_reboot, allow_recreate):
self.log('Failed to register domain XML with the hypervisor')
return False
- newEnv = copy.deepcopy(os.environ)
- newEnv["NIXOPS_LIBVIRTD_PUBKEY"] = self.client_public_key
- base_image = self._logged_exec(
- ["nix-build"] + self.depl._eval_flags(self.depl.nix_exprs) +
- ["--arg", "checkConfigurationOptions", "false",
- "-A", "nodes.{0}.config.deployment.libvirtd.baseImage".format(self.name),
- "-o", "{0}/libvirtd-image-{1}".format(self.depl.tempdir, self.name)],
- capture_stdout=True, env=newEnv).rstrip()
-
- if not os.access(defn.image_dir, os.W_OK):
- raise Exception('{} is not writable by this user or it does not exist'.format(defn.image_dir))
-
- self.disk_path = self._disk_path(defn)
- shutil.copyfile(base_image + "/disk.qcow2", self.disk_path)
- # Rebase onto empty backing file to prevent breaking the disk image
- # when the backing file gets garbage collected.
- self._logged_exec(["qemu-img", "rebase", "-f", "qcow2", "-b",
- "", self.disk_path])
- os.chmod(self.disk_path, 0660)
self.vm_id = self._vm_id()
self.start()
return True
- def _disk_path(self, defn):
- return "{0}/{1}.img".format(defn.image_dir, self._vm_id())
+ def _prepare_storage_volume(self):
+ self.logger.log("preparing disk image...")
+ newEnv = copy.deepcopy(os.environ)
+ newEnv["NIXOPS_LIBVIRTD_PUBKEY"] = self.client_public_key
+
+ temp_image_path = os.path.join(self.depl.tempdir, 'libvirtd-image-{}'.format(self.name))
+ base_image = self._logged_exec(
+ ["nix-build"] + self.depl._eval_flags(self.depl.nix_exprs) +
+ ["--arg", "checkConfigurationOptions", "false",
+ "-A", "nodes.{0}.config.deployment.libvirtd.baseImage".format(self.name),
+ "-o", temp_image_path],
+ capture_stdout=True, env=newEnv).rstrip()
+
+ temp_disk_path = os.path.join(self.depl.tempdir, 'disk-{}.qcow2'.format(self.name))
+ shutil.copyfile(os.path.join(temp_image_path, 'disk.qcow2'), temp_disk_path)
+ # Rebase onto empty backing file to prevent breaking the disk image
+ # when the backing file gets garbage collected.
+ self._logged_exec(["qemu-img", "rebase", "-f", "qcow2", "-b",
+ "", temp_disk_path])
+
+ self.logger.log("uploading disk image...")
+ image_info = self._get_image_info(temp_disk_path)
+ self._vol = self._create_volume(image_info['virtual-size'], image_info['actual-size'])
+ self._upload_volume(temp_disk_path, image_info['actual-size'])
+
+ def _get_image_info(self, filename):
+ output = self._logged_exec(["qemu-img", "info", "--output", "json", filename], capture_stdout=True)
+ return json.loads(output)
+
+ def _create_volume(self, virtual_size, actual_size):
+ xml = '''
+
+ {name}
+ {virtual_size}
+ {actual_size}
+
+
+
+
+ '''.format(
+ name="{}.qcow2".format(self._vm_id()),
+ virtual_size=virtual_size,
+ actual_size=actual_size,
+ )
+ vol = self.pool.createXML(xml)
+ self._vol = vol
+ return vol
- def _make_domain_xml(self, defn):
- qemu_executable = "qemu-system-x86_64"
- qemu = spawn.find_executable(qemu_executable)
- assert qemu is not None, "{} executable not found. Please install QEMU first.".format(qemu_executable)
+ def _upload_volume(self, filename, actual_size):
+ stream = self.conn.newStream()
+ self.vol.upload(stream, offset=0, length=actual_size)
+
+ def read_file(stream, nbytes, f):
+ return f.read(nbytes)
- def maybe_mac(n):
- if n == self.primary_net:
- return ''
- else:
- return ""
+ with open(filename, 'rb') as f:
+ stream.sendAll(read_file, f)
+ stream.finish()
+
+ def _get_qemu_executable(self):
+ domaincaps_xml = self.conn.getDomainCapabilities(
+ emulatorbin=None, arch='x86_64', machine=None, virttype='kvm',
+ )
+ domaincaps = ElementTree.fromstring(domaincaps_xml)
+ return domaincaps.find('./path').text.strip()
+
+ def _make_domain_xml(self, defn):
+ qemu = self._get_qemu_executable()
def iface(n):
return "\n".join([
- ' ',
- maybe_mac(n),
- ' ',
+ ' ',
+ ' ',
' ',
- ]).format(n)
+ ]).format(
+ interface_type=n.interface_type,
+ source=n.source,
+ )
def _make_os(defn):
return [
@@ -182,10 +278,9 @@ def _make_os(defn):
' hvm',
" %s" % defn.kernel,
" %s" % defn.initrd if len(defn.kernel) > 0 else "",
- " %s"% defn.cmdline if len(defn.kernel) > 0 else "",
+ " %s" % defn.cmdline if len(defn.kernel) > 0 else "",
'']
-
domain_fmt = "\n".join([
'',
' {0}',
@@ -203,6 +298,10 @@ def _make_os(defn):
' ' if not defn.headless else "",
' ',
' ',
+ ' ',
+ ' ',
+ ' ',
+ ' ',
defn.extra_devices,
' ',
defn.extra_domain,
@@ -213,7 +312,7 @@ def _make_os(defn):
self._vm_id(),
defn.memory_size,
qemu,
- self._disk_path(defn),
+ self.vol.path(),
defn.vcpu,
defn.domain_type
)
@@ -222,19 +321,29 @@ def _parse_ip(self):
"""
return an ip v4
"""
- # alternative is VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE if qemu agent is available
- ifaces = self.dom.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0)
- if (ifaces == None):
+
+ dom_xml_str = self.dom.XMLDesc(0)
+ xml = ElementTree.fromstring(dom_xml_str)
+ first_iface_mac = xml.find('.//interface[1]/mac').get('address')
+
+ try:
+ ifaces = self.dom.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT, 0)
+ except libvirt.libvirtError:
+ return
+
+ if ifaces is None:
self.log("Failed to get domain interfaces")
return
- for (name, val) in ifaces.iteritems():
- if val['addrs']:
- for ipaddr in val['addrs']:
- return ipaddr['addr']
+ first_iface = next(v for k, v in ifaces.iteritems()
+ if v.get('hwaddr', None) == first_iface_mac)
+
+ addrs = first_iface.get('addrs', [])
+
+ return addrs[0]['addr']
+
def _wait_for_ip(self, prev_time):
- self.log_start("waiting for IP address to appear in DHCP leases...")
while True:
ip = self._parse_ip()
if ip:
@@ -254,7 +363,6 @@ def _is_running(self):
def start(self):
assert self.vm_id
assert self.domain_xml
- assert self.primary_net
if self._is_running():
self.log("connecting...")
self.private_ipv4 = self._parse_ip()
@@ -278,14 +386,20 @@ def stop(self):
self.state = self.STOPPED
def destroy(self, wipe=False):
- if not self.vm_id:
- return True
self.log_start("destroying... ")
- self.stop()
- if self.dom.undefine() != 0:
- self.log("Failed undefining domain")
- return False
+
+ if self.vm_id is not None:
+ self.stop()
+ if self.dom.undefine() != 0:
+ self.log("Failed undefining domain")
+ return False
if (self.disk_path and os.path.exists(self.disk_path)):
+ # the deployment was created by an older NixOps version that did
+ # not use the libvirtd API for uploading disk images
os.unlink(self.disk_path)
+
+ if self.storage_volume_name is not None:
+ self.vol.delete()
+
return True