From 6705655f0d4a57313cd18792e55f307722a23e19 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 3 Jul 2022 09:53:20 +0200 Subject: [PATCH 01/38] Begin experiments for docker_container rewrite. --- plugins/module_utils/module_container.py | 606 +++++++++++++++++++++++ 1 file changed, 606 insertions(+) create mode 100644 plugins/module_utils/module_container.py diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py new file mode 100644 index 000000000..c0442e283 --- /dev/null +++ b/plugins/module_utils/module_container.py @@ -0,0 +1,606 @@ +# Copyright (c) 2022 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import shlex + +from functools import partial + +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.formatters import human_to_bytes + +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.util import ( + parse_healthcheck, +) + + +def _get_ansible_type(type): + if type == 'set': + return 'list' + if type not in ('list', 'dict', 'int', 'float', 'str'): + raise Exception('Invalid type "%s"' % (type, )) + return type + + +class Option(object): + def __init__(self, name, type, ansible_type=None, elements=None, ansible_elements=None, ansible_suboptions=None, ansible_aliases=None): + self.name = name + self.type = type + self.ansible_type = ansible_type or _get_ansible_type(type) + needs_elements = self.type in ('list', 'set') + needs_ansible_elements = self.ansible_type in ('list', ) + if elements is not None and not needs_elements: + raise Exception('elements only allowed for lists/sets') + if elements is None and needs_elements: + raise Exception('elements required for lists/sets') + if ansible_elements is not None and not needs_ansible_elements: + raise Exception('Ansible elements only allowed for Ansible lists') + if (elements is None and ansible_elements is None) and needs_ansible_elements: + raise Exception('Ansible elements required for Ansible lists') + self.elements = elements if needs_elements else None + self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None + needs_suboptions = (self.type in ('list', 'set') and elements == 'dict') or (self.type == 'dict') + if ansible_suboptions is not None and not needs_suboptions: + raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') + if ansible_suboptions is None and needs_suboptions: + raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') + self.ansible_suboptions = ansible_suboptions if needs_suboptions else None + self.ansible_aliases = ansible_aliases + + +class OptionGroup(object): + def __init__(self, preprocess=None): + if preprocess is None: + def preprocess(module, values): + return values + self.preprocess = preprocess + self.options = [] + self.engines = {} + + def add_option(self, *args, **kwargs): + self.options.append(Option(*args, **kwargs)) + return self + + def add_docker_api(self, docker_api): + self.engines['docker_api'] = docker_api + return self + + +_SENTRY = object() + + +def DockerAPIEngine(object): + def __init__(self, get_value, preprocess_value, set_value=None, update_value=None, can_set_value=None, can_update_value=None, min_docker_api=None): + self.min_docker_api = min_docker_api + self.min_docker_api_obj = None if min_docker_api is None else LooseVersion(min_docker_api) + self.get_value = get_value + self.set_value = set_value + self.preprocess_value = preprocess_value + self.update_value = update_value + self.can_set_value = can_set_value or (lambda api_version: set_value is not None) + self.can_update_value = can_update_value or (lambda api_version: update_value is not None) + + @classmethod + def config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None): + def preprocess_value_(module, api_version, options, values): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + if preprocess_value is not None and options[0].name in values: + values[options[0].name] = preprocess_value(module, api_version, values[options[0].name]) + return values + + def get_value(module, container, api_version, options): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + value = container.get(host_config_name, _SENTRY) + if postprocess_for_get: + value = postprocess_for_get(module, api_version, value, _SENTRY) + if value is _SENTRY: + return {} + return {options[0].name: value} + + def set_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[host_config_name] = value + + return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api) + + @classmethod + def host_config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None): + def preprocess_value_(module, api_version, options, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + if preprocess_value is not None and options[0].name in values: + values[options[0].name] = preprocess_value(module, api_version, values[options[0].name]) + return values + + def get_value(module, container, api_version, options): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = container['HostConfig'].get(host_config_name, _SENTRY) + if postprocess_for_get: + value = postprocess_for_get(module, api_version, value, _SENTRY) + if value is _SENTRY: + return {} + return {options[0].name: value} + + def set_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + if options[0].name not in values: + return + if 'HostConfig' not in data: + data['HostConfig'] = {} + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data['HostConfig'][host_config_name] = value + + return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api) + + +def _preprocess_command(module, api_version, value): + if module.params['command_handling'] == 'correct': + if value is not None: + if not isinstance(value, list): + # convert from str to list + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str + if isinstance(value, list): + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + return value + + +def _preprocess_entrypoint(module, api_version, value): + if module.params['command_handling'] == 'correct': + if value is not None: + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str. + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + return value + + +def _preprocess_cpus(module, api_version, value): + if value is not None: + value = int(round(value * 1E9)) + return value + + +def _preprocess_devices(module, api_version, value): + if not value: + return value + expected_devices = [] + for device in value: + parts = device.split(':') + if len(parts) == 1: + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[0], + PathOnHost=parts[0] + )) + elif len(parts) == 2: + parts = device.split(':') + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[1], + PathOnHost=parts[0] + ) + ) + else: + expected_devices.append( + dict( + CgroupPermissions=parts[2], + PathInContainer=parts[1], + PathOnHost=parts[0] + )) + return expected_devices + + +def _preprocess_rate_bps(module, api_version, value, name): + if not value: + return value + devices = [] + for device in value: + devices.append({ + 'Path': device['path'], + 'Rate': human_to_bytes(device['rate']), + }) + return devices + + +def _preprocess_rate_iops(module, api_version, value, name): + if not value: + return value + devices = [] + for device in value: + devices.append({ + 'Path': device['path'], + 'Rate': device['rate'], + }) + return devices + + +def _preprocess_device_requests(module, api_version, value): + if not value: + return value + device_requests = [] + for dr in value: + device_requests.append({ + 'Driver': dr['driver'], + 'Count': dr['count'], + 'DeviceIDs': dr['device_ids'], + 'Capabilities': dr['capabilities'], + 'Options': dr['options'], + }) + return device_requests + + +def _preprocess_etc_hosts(module, api_version, value): + if value is None: + return value + results = [] + for key, value in value.items(): + results.append("%s%s%s" % (key, ':', value)) + return results + + +def _preprocess_healthcheck(module, api_version, value): + if value is None: + return value + healthcheck, disable_healthcheck = parse_healthcheck(value) + if disable_healthcheck: + healthcheck = {'test': ['NONE']} + if not healthcheck: + return None + return { + 'Test': healthcheck.get('test'), + 'Interval': healthcheck.get('interval'), + 'Timeout': healthcheck.get('timeout'), + 'StartPeriod': healthcheck.get('start_period'), + 'Retries': healthcheck.get('retries'), + } + + +def _postprocess_healthcheck_get_value(module, api_version, value, sentry): + if value is None or value is sentry or value.get('Test') == ['NONE']: + return {'Test': ['NONE']} + return value + + +def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): + if name not in values: + return + try: + value = values[name] + if unlimited_value is not None and value == 'unlimited': + value = unlimited_value + else: + value = human_to_bytes(value) + values[name] = + except ValueError as exc: + self.fail("Failed to convert %s to bytes: %s" % (name, to_native(exc))) + + +def _preprocess_links(module, api_version, value): + if value is None: + return None + + result = [] + for link in value: + parsed_link = link.split(':', 1) + if len(parsed_link) == 2: + link, alias = parsed_link + else: + link, alias = parsed_link[0], parsed_link[0] + result.append("/%s:/%s/%s" % (link, module.params['name'], alias)) + + return result + + +OPTIONS = [ + OptionGroup() + .add_option('auto_remove', type='bool') + .add_docker_api(DockerAPIEngine.host_config_value('AutoRemove', min_docker_api='1.25')), + + OptionGroup() + .add_option('capabilities', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('CapAdd')), + + OptionGroup() + .add_option('cap_drop', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('CapDrop')), + + OptionGroup() + .add_option('cgroup_parent', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('CgroupParent')), + + OptionGroup() + .add_option('command', type='list', elements='str', ansible_type='raw') + .add_docker_api(DockerAPIEngine.config_value('Cmd', preprocess_value=_preprocess_command)), + + OptionGroup() + .add_option('entrypoint', type='list', elements='str') + .add_docker_api(DockerAPIEngine.config_value('Entrypoint', preprocess_value=_preprocess_command)), + + OptionGroup() + .add_option('cpus', type='int', ansible_type='float') + .add_docker_api(DockerAPIEngine.host_config_value('NanoCpus', min_docker_api='1.25', preprocess_value=_preprocess_cpus)), + + OptionGroup() + .add_option('devices', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)), + + OptionGroup() + .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_bps, name='device_read_bps'))), + + OptionGroup() + .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_bps, name='device_write_bps'))), + + OptionGroup() + .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_iops, name='device_read_iops'))), + + OptionGroup() + .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_iops, name='device_write_iops'))), + + OptionGroup() + .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( + capabilities=dict(type='list', elements='list'), + count=dict(type='int'), + device_ids=dict(type='list', elements='str'), + driver=dict(type='str'), + options=dict(type='dict'), + )) + .add_docker_api(DockerAPIEngine.host_config_value('DeviceRequests', min_docker_api='1.40', preprocess_value=_preprocess_device_requests)), + + OptionGroup() + .add_option('dns_servers', type='list', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('Dns')), + + OptionGroup() + .add_option('dns_opts', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions', min_docker_api='1.21')), + + OptionGroup() + .add_option('dns_search_domains', type='list', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('DnsSearch')), + + OptionGroup() + .add_option('domainname', type='str') + .add_docker_api(DockerAPIEngine.config_value('Domainname')), + + OptionGroup() + .add_option('etc_hosts', type='set', ansible_type='dict', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)), + + OptionGroup() + .add_option('groups', type='set', elements='str') + .add_docker_api(DockerAPIEngine.config_value('GroupAdd')), + + OptionGroup() + .add_option('healthcheck', type='dict', ansible_suboptions=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )) + .add_docker_api(DockerAPIEngine.config_value('GroupAdd', min_docker_api='1.24', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), + + OptionGroup() + .add_option('hostname', type='str') + .add_docker_api(DockerAPIEngine.config_value('Hostname')), + + OptionGroup() + .add_option('init', type='bool') + .add_docker_api(DockerAPIEngine.host_config_value('Init', min_docker_api='1.25')), + + OptionGroup() + .add_option('interactive', type='bool') + .add_docker_api(DockerAPIEngine.config_value('OpenStdin')), + + OptionGroup() + .add_option('links', type='set', elements='list', ansible_elements='str') + .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_values=_preprocess_links)), + + OptionGroup() + .add_option('memory_swappiness', type='int') + .add_docker_api(DockerAPIEngine.host_config_value('MemorySwappiness')), +] + +# Regular module options: +# cleanup=dict(type='bool', default=False), +# comparisons=dict(type='dict'), +# container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), +# command_handling=dict(type='str', choices=['compatibility', 'correct']), +# default_host_ip=dict(type='str'), +# force_kill=dict(type='bool', default=False, aliases=['forcekill']), +# ignore_image=dict(type='bool', default=False), +# image=dict(type='str'), +# image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), +# keep_volumes=dict(type='bool', default=True), +# kill_signal=dict(type='str'), +# name=dict(type='str', required=True), + +# Options that can be updated: +# blkio_weight=dict(type='int'), +# cpu_period=dict(type='int'), +# cpu_quota=dict(type='int'), +# cpuset_cpus=dict(type='str'), +# cpuset_mems=dict(type='str'), +# cpu_shares=dict(type='int'), + +# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) +# .add_option('kernel_memory', type='int', ansible_type='str') +# .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', min_docker_api='1.21')), + + +# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) +# .add_option('memory', type='int', ansible_type='str') +# .add_docker_api(DockerAPIEngine.host_config_value('Memory')), + +# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) +# .add_option('memory_reservation', type='int', ansible_type='str') +# .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', min_docker_api='1.21')), + +# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) +# .add_option('memory_swap', type='int', ansible_type='str') +# .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap')), + +# Options / option groups that are more complex: +# detach=dict(type='bool'), +# env=dict(type='dict'), +# env_file=dict(type='path'), +# exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), +# ipc_mode=dict(type='str'), +# labels=dict(type='dict'), + + +# OptionGroup() +# .add_option('log_driver', type='str') +# .add_option('log_options', type='dict', ansible_aliases=['log_opt']) +# .add_docker_api(...) + +# if self.mac_address: +# # Ensure the MAC address uses colons instead of hyphens for later comparison +# self.mac_address = self.mac_address.replace('-', ':') +# +# mac_address=config.get('MacAddress', network.get('MacAddress')), +# +# mac_address=dict(type='str'), +# mac_address=dict(docker_api_version='1.25'), + +# REQUIRES_CONVERSION_TO_BYTES = [ +# 'shm_size' +# ] + +# Options to convert / triage: +# mounts=dict(type='list', elements='dict', options=dict( +# target=dict(type='str', required=True), +# source=dict(type='str'), +# type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), +# read_only=dict(type='bool'), +# consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), +# propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), +# no_copy=dict(type='bool'), +# labels=dict(type='dict'), +# volume_driver=dict(type='str'), +# volume_options=dict(type='dict'), +# tmpfs_size=dict(type='str'), +# tmpfs_mode=dict(type='str'), +# )), +# network_mode=dict(type='str'), +# networks=dict(type='list', elements='dict', options=dict( +# name=dict(type='str', required=True), +# ipv4_address=dict(type='str'), +# ipv6_address=dict(type='str'), +# aliases=dict(type='list', elements='str'), +# links=dict(type='list', elements='str'), +# )), +# networks_cli_compatible=dict(type='bool', default=True), +# oom_killer=dict(type='bool'), +# oom_score_adj=dict(type='int'), +# output_logs=dict(type='bool', default=False), +# paused=dict(type='bool'), +# pid_mode=dict(type='str'), +# pids_limit=dict(type='int'), +# privileged=dict(type='bool'), +# publish_all_ports=dict(type='bool'), +# published_ports=dict(type='list', elements='str', aliases=['ports']), +# pull=dict(type='bool', default=False), +# purge_networks=dict(type='bool', default=False), +# read_only=dict(type='bool'), +# recreate=dict(type='bool', default=False), +# removal_wait_timeout=dict(type='float'), +# restart=dict(type='bool', default=False), +# restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), +# restart_retries=dict(type='int'), +# runtime=dict(type='str'), +# security_opts=dict(type='list', elements='str'), +# shm_size=dict(type='str'), +# state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), +# stop_signal=dict(type='str'), +# stop_timeout=dict(type='int'), +# storage_opts=dict(type='dict'), +# sysctls=dict(type='dict'), +# tmpfs=dict(type='list', elements='str'), +# tty=dict(type='bool'), +# ulimits=dict(type='list', elements='str'), +# user=dict(type='str'), +# userns_mode=dict(type='str'), +# uts=dict(type='str'), +# volume_driver=dict(type='str'), +# volumes=dict(type='list', elements='str'), +# volumes_from=dict(type='list', elements='str'), +# working_dir=dict(type='str'), +# +# # normal options +# ipc_mode=dict(docker_api_version='1.25'), +# oom_score_adj=dict(docker_api_version='1.22'), +# shm_size=dict(docker_api_version='1.22'), +# stop_signal=dict(docker_api_version='1.21'), +# tmpfs=dict(docker_api_version='1.22'), +# volume_driver=dict(docker_api_version='1.21'), +# runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'), +# sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'), +# userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'), +# uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'), +# pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'), +# mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), +# storage_opts=dict(docker_py_version='2.1.0', docker_api_version='1.24'), +# # specials +# ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22', +# detect_usage=detect_ipvX_address_usage, +# usage_msg='ipv4_address or ipv6_address in networks'), +# stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25') +# stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' +# if not stop_timeout_supported: +# if stop_timeout_needed_for_update and not stop_timeout_supported: +# # We warn (instead of fail) since in older versions, stop_timeout was not used +# # to update the container's configuration, but only when stopping a container. +# self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or " +# "update the container's stop_timeout configuration." % (self.docker_api_version_str,)) +# self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported +# +# explicit_types = dict( +# env='set', +# mounts='set(dict)', +# networks='set(dict)', +# ulimits='set(dict)', +# ) +# +# default_values = dict( +# stop_timeout='ignore', +# } From 0a249c9597820fad12db5511c97bcbd71cfd87cb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 3 Jul 2022 13:53:29 +0200 Subject: [PATCH 02/38] Continued. --- plugins/module_utils/module_container.py | 97 +++++++++++++++++------- 1 file changed, 68 insertions(+), 29 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index c0442e283..029efdc49 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -82,7 +82,7 @@ def __init__(self, get_value, preprocess_value, set_value=None, update_value=Non self.can_update_value = can_update_value or (lambda api_version: update_value is not None) @classmethod - def config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None): + def config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None, update_parameter=None): def preprocess_value_(module, api_version, options, values): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') @@ -110,10 +110,22 @@ def set_value(module, data, api_version, options, values): value = preprocess_for_set(module, api_version, value) data[host_config_name] = value - return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api) + update_value = None + if update_parameter: + def update_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('update_parameter can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[update_paramete] = value + + return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) @classmethod - def host_config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None): + def host_config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None, update_parameter=None): def preprocess_value_(module, api_version, options, values): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') @@ -143,7 +155,19 @@ def set_value(module, data, api_version, options, values): value = preprocess_for_set(module, api_version, value) data['HostConfig'][host_config_name] = value - return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api) + update_value = None + if update_parameter: + def update_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('update_parameter can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[update_paramete] = value + + return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) def _preprocess_command(module, api_version, value): @@ -319,6 +343,10 @@ def _preprocess_links(module, api_version, value): .add_option('auto_remove', type='bool') .add_docker_api(DockerAPIEngine.host_config_value('AutoRemove', min_docker_api='1.25')), + OptionGroup() + .add_option('blkio_weight', type='int') + .add_docker_api(DockerAPIEngine.config_value('BlkioWeight')), + OptionGroup() .add_option('capabilities', type='set', elements='str') .add_docker_api(DockerAPIEngine.host_config_value('CapAdd')), @@ -335,6 +363,26 @@ def _preprocess_links(module, api_version, value): .add_option('command', type='list', elements='str', ansible_type='raw') .add_docker_api(DockerAPIEngine.config_value('Cmd', preprocess_value=_preprocess_command)), + OptionGroup() + .add_option('cpu_period', type='int') + .add_docker_api(DockerAPIEngine.config_value('CpuPeriod')), + + OptionGroup() + .add_option('cpu_quota', type='int') + .add_docker_api(DockerAPIEngine.config_value('CpuQuota')), + + OptionGroup() + .add_option('cpuset_cpus', type='str') + .add_docker_api(DockerAPIEngine.config_value('CpuShares')), + + OptionGroup() + .add_option('cpuset_mems', type='str') + .add_docker_api(DockerAPIEngine.config_value('CpusetCpus')), + + OptionGroup() + .add_option('cpu_shares', type='int') + .add_docker_api(DockerAPIEngine.config_value('CpusetMems')), + OptionGroup() .add_option('entrypoint', type='list', elements='str') .add_docker_api(DockerAPIEngine.config_value('Entrypoint', preprocess_value=_preprocess_command)), @@ -431,10 +479,26 @@ def _preprocess_links(module, api_version, value): .add_option('interactive', type='bool') .add_docker_api(DockerAPIEngine.config_value('OpenStdin')), + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) + .add_option('kernel_memory', type='int', ansible_type='str') + .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', min_docker_api='1.21')), + OptionGroup() .add_option('links', type='set', elements='list', ansible_elements='str') .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_values=_preprocess_links)), + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) + .add_option('memory', type='int', ansible_type='str') + .add_docker_api(DockerAPIEngine.host_config_value('Memory')), + + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) + .add_option('memory_reservation', type='int', ansible_type='str') + .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', min_docker_api='1.21')), + + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) + .add_option('memory_swap', type='int', ansible_type='str') + .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap')), + OptionGroup() .add_option('memory_swappiness', type='int') .add_docker_api(DockerAPIEngine.host_config_value('MemorySwappiness')), @@ -454,31 +518,6 @@ def _preprocess_links(module, api_version, value): # kill_signal=dict(type='str'), # name=dict(type='str', required=True), -# Options that can be updated: -# blkio_weight=dict(type='int'), -# cpu_period=dict(type='int'), -# cpu_quota=dict(type='int'), -# cpuset_cpus=dict(type='str'), -# cpuset_mems=dict(type='str'), -# cpu_shares=dict(type='int'), - -# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) -# .add_option('kernel_memory', type='int', ansible_type='str') -# .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', min_docker_api='1.21')), - - -# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) -# .add_option('memory', type='int', ansible_type='str') -# .add_docker_api(DockerAPIEngine.host_config_value('Memory')), - -# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) -# .add_option('memory_reservation', type='int', ansible_type='str') -# .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', min_docker_api='1.21')), - -# OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) -# .add_option('memory_swap', type='int', ansible_type='str') -# .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap')), - # Options / option groups that are more complex: # detach=dict(type='bool'), # env=dict(type='dict'), From 47ddcad37b66bdb251a3ead183d528b92adfba64 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 3 Jul 2022 14:00:59 +0200 Subject: [PATCH 03/38] We support API >= 1.25 only anyway. --- plugins/module_utils/module_container.py | 51 +++++------------------- 1 file changed, 11 insertions(+), 40 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 029efdc49..83fdf6df3 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -341,7 +341,7 @@ def _preprocess_links(module, api_version, value): OPTIONS = [ OptionGroup() .add_option('auto_remove', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('AutoRemove', min_docker_api='1.25')), + .add_docker_api(DockerAPIEngine.host_config_value('AutoRemove')), OptionGroup() .add_option('blkio_weight', type='int') @@ -389,7 +389,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('cpus', type='int', ansible_type='float') - .add_docker_api(DockerAPIEngine.host_config_value('NanoCpus', min_docker_api='1.25', preprocess_value=_preprocess_cpus)), + .add_docker_api(DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus)), OptionGroup() .add_option('devices', type='set', elements='str') @@ -400,28 +400,28 @@ def _preprocess_links(module, api_version, value): path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_bps, name='device_read_bps'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=partial(_preprocess_rate_bps, name='device_read_bps'))), OptionGroup() .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_bps, name='device_write_bps'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=partial(_preprocess_rate_bps, name='device_write_bps'))), OptionGroup() .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_iops, name='device_read_iops'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=partial(_preprocess_rate_iops, name='device_read_iops'))), OptionGroup() .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', min_docker_api='1.22', preprocess_value=partial(_preprocess_rate_iops, name='device_write_iops'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=partial(_preprocess_rate_iops, name='device_write_iops'))), OptionGroup() .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( @@ -439,7 +439,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('dns_opts', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions', min_docker_api='1.21')), + .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions'), OptionGroup() .add_option('dns_search_domains', type='list', elements='str') @@ -465,7 +465,7 @@ def _preprocess_links(module, api_version, value): start_period=dict(type='str'), retries=dict(type='int'), )) - .add_docker_api(DockerAPIEngine.config_value('GroupAdd', min_docker_api='1.24', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), + .add_docker_api(DockerAPIEngine.config_value('GroupAdd', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), OptionGroup() .add_option('hostname', type='str') @@ -473,7 +473,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('init', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('Init', min_docker_api='1.25')), + .add_docker_api(DockerAPIEngine.host_config_value('Init')), OptionGroup() .add_option('interactive', type='bool') @@ -481,7 +481,7 @@ def _preprocess_links(module, api_version, value): OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) .add_option('kernel_memory', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', min_docker_api='1.21')), + .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory')), OptionGroup() .add_option('links', type='set', elements='list', ansible_elements='str') @@ -493,7 +493,7 @@ def _preprocess_links(module, api_version, value): OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) .add_option('memory_reservation', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', min_docker_api='1.21')), + .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation')), OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) .add_option('memory_swap', type='int', ansible_type='str') @@ -539,7 +539,6 @@ def _preprocess_links(module, api_version, value): # mac_address=config.get('MacAddress', network.get('MacAddress')), # # mac_address=dict(type='str'), -# mac_address=dict(docker_api_version='1.25'), # REQUIRES_CONVERSION_TO_BYTES = [ # 'shm_size' @@ -605,34 +604,6 @@ def _preprocess_links(module, api_version, value): # volumes_from=dict(type='list', elements='str'), # working_dir=dict(type='str'), # -# # normal options -# ipc_mode=dict(docker_api_version='1.25'), -# oom_score_adj=dict(docker_api_version='1.22'), -# shm_size=dict(docker_api_version='1.22'), -# stop_signal=dict(docker_api_version='1.21'), -# tmpfs=dict(docker_api_version='1.22'), -# volume_driver=dict(docker_api_version='1.21'), -# runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'), -# sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'), -# userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'), -# uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'), -# pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'), -# mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), -# storage_opts=dict(docker_py_version='2.1.0', docker_api_version='1.24'), -# # specials -# ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22', -# detect_usage=detect_ipvX_address_usage, -# usage_msg='ipv4_address or ipv6_address in networks'), -# stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25') -# stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' -# if not stop_timeout_supported: -# if stop_timeout_needed_for_update and not stop_timeout_supported: -# # We warn (instead of fail) since in older versions, stop_timeout was not used -# # to update the container's configuration, but only when stopping a container. -# self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or " -# "update the container's stop_timeout configuration." % (self.docker_api_version_str,)) -# self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported -# # explicit_types = dict( # env='set', # mounts='set(dict)', From b6d2bb40a8163e2ebeeaa8c212b0c26ee9b327c5 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 3 Jul 2022 16:27:41 +0200 Subject: [PATCH 04/38] Continued. --- plugins/module_utils/module_container.py | 98 +- plugins/modules/docker_container.py | 4 +- plugins/modules/docker_container2.py | 1313 ++++++++++++++++++++++ 3 files changed, 1386 insertions(+), 29 deletions(-) create mode 100644 plugins/modules/docker_container2.py diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 83fdf6df3..2638b8456 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -24,7 +24,16 @@ def _get_ansible_type(type): class Option(object): - def __init__(self, name, type, ansible_type=None, elements=None, ansible_elements=None, ansible_suboptions=None, ansible_aliases=None): + def __init__( + self, + name, + type, + ansible_type=None, + elements=None, + ansible_elements=None, + ansible_suboptions=None, + ansible_aliases=None, + ): self.name = name self.type = type self.ansible_type = ansible_type or _get_ansible_type(type) @@ -50,16 +59,41 @@ def __init__(self, name, type, ansible_type=None, elements=None, ansible_element class OptionGroup(object): - def __init__(self, preprocess=None): + def __init__( + self, + preprocess=None, + ansible_mutually_exclusive=None, + ansible_required_together=None, + ansible_required_one_of=None, + ansible_required_if=None, + ansible_required_by=None, + ): if preprocess is None: def preprocess(module, values): return values self.preprocess = preprocess self.options = [] self.engines = {} + self.ansible_mutually_exclusive = ansible_mutually_exclusive or [] + self.ansible_required_together = ansible_required_together or [] + self.ansible_required_one_of = ansible_required_one_of or [] + self.ansible_required_if = ansible_required_if or [] + self.ansible_required_by = ansible_required_by of {} + self.argument_spec = {} def add_option(self, *args, **kwargs): - self.options.append(Option(*args, **kwargs)) + option = Option(*args, **kwargs) + self.options.append(option) + ansible_option = { + 'type': option.ansible_type, + } + if option.ansible_elements is not None: + ansible_option['elements'] = option.ansible_elements + if option.ansible_suboptions is not None: + ansible_option['options'] = option.ansible_suboptions + if options.ansible_aliases: + ansible_option['aliases'] = option.ansible_aliases + self.argument_spec[option.name] = ansible_option return self def add_docker_api(self, docker_api): @@ -70,8 +104,21 @@ def add_docker_api(self, docker_api): _SENTRY = object() -def DockerAPIEngine(object): - def __init__(self, get_value, preprocess_value, set_value=None, update_value=None, can_set_value=None, can_update_value=None, min_docker_api=None): +class DockerAPIEngineDriver(object): + pass + + +class DockerAPIEngine(object): + def __init__( + self, + get_value, + preprocess_value, + set_value=None, + update_value=None, + can_set_value=None, + can_update_value=None, + min_docker_api=None, + ): self.min_docker_api = min_docker_api self.min_docker_api_obj = None if min_docker_api is None else LooseVersion(min_docker_api) self.get_value = get_value @@ -82,7 +129,15 @@ def __init__(self, get_value, preprocess_value, set_value=None, update_value=Non self.can_update_value = can_update_value or (lambda api_version: update_value is not None) @classmethod - def config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None, update_parameter=None): + def config_value( + cls, + host_config_name, + postprocess_for_get=None, + preprocess_for_set=None, + min_docker_api=None, + preprocess_value=None, + update_parameter=None, + ): def preprocess_value_(module, api_version, options, values): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') @@ -125,7 +180,15 @@ def update_value(module, data, api_version, options, values): return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) @classmethod - def host_config_value(cls, host_config_name, postprocess_for_get=None, preprocess_for_set=None, min_docker_api=None, preprocess_value=None, update_parameter=None): + def host_config_value( + cls, + host_config_name, + postprocess_for_get=None, + preprocess_for_set=None, + min_docker_api=None, + preprocess_value=None, + update_parameter=None, + ): def preprocess_value_(module, api_version, options, values): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') @@ -504,20 +567,6 @@ def _preprocess_links(module, api_version, value): .add_docker_api(DockerAPIEngine.host_config_value('MemorySwappiness')), ] -# Regular module options: -# cleanup=dict(type='bool', default=False), -# comparisons=dict(type='dict'), -# container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), -# command_handling=dict(type='str', choices=['compatibility', 'correct']), -# default_host_ip=dict(type='str'), -# force_kill=dict(type='bool', default=False, aliases=['forcekill']), -# ignore_image=dict(type='bool', default=False), -# image=dict(type='str'), -# image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), -# keep_volumes=dict(type='bool', default=True), -# kill_signal=dict(type='str'), -# name=dict(type='str', required=True), - # Options / option groups that are more complex: # detach=dict(type='bool'), # env=dict(type='dict'), @@ -527,7 +576,7 @@ def _preprocess_links(module, api_version, value): # labels=dict(type='dict'), -# OptionGroup() +# OptionGroup(ansible_required_by={'log_options': ['log_driver']}) # .add_option('log_driver', type='str') # .add_option('log_options', type='dict', ansible_aliases=['log_opt']) # .add_docker_api(...) @@ -577,19 +626,14 @@ def _preprocess_links(module, api_version, value): # privileged=dict(type='bool'), # publish_all_ports=dict(type='bool'), # published_ports=dict(type='list', elements='str', aliases=['ports']), -# pull=dict(type='bool', default=False), # purge_networks=dict(type='bool', default=False), # read_only=dict(type='bool'), -# recreate=dict(type='bool', default=False), # removal_wait_timeout=dict(type='float'), -# restart=dict(type='bool', default=False), # restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), # restart_retries=dict(type='int'), # runtime=dict(type='str'), # security_opts=dict(type='list', elements='str'), # shm_size=dict(type='str'), -# state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), -# stop_signal=dict(type='str'), # stop_timeout=dict(type='int'), # storage_opts=dict(type='dict'), # sysctls=dict(type='dict'), diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index bf206e948..474824896 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -11,10 +11,10 @@ --- module: docker_container -short_description: manage docker containers +short_description: manage Docker containers description: - - Manage the life cycle of docker containers. + - Manage the life cycle of Docker containers. - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py new file mode 100644 index 000000000..577fa3e8b --- /dev/null +++ b/plugins/modules/docker_container2.py @@ -0,0 +1,1313 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_container2 + +short_description: manage Docker containers + +description: + - Manage the life cycle of Docker containers. + - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. + + +notes: + - For most config changes, the container needs to be recreated. This means that the existing container has to be destroyed and + a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to + prevent this. + - If the module needs to recreate the container, it will only use the options provided to the module to create the + new container (except I(image)). Therefore, always specify B(all) options relevant to the container. + - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected. + +options: + auto_remove: + description: + - Enable auto-removal of the container on daemon side when the container's process exits. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + blkio_weight: + description: + - Block IO (relative weight), between 10 and 1000. + type: int + capabilities: + description: + - List of capabilities to add to the container. + - This is equivalent to C(docker run --cap-add), or the docker-compose option C(cap_add). + type: list + elements: str + cap_drop: + description: + - List of capabilities to drop from the container. + type: list + elements: str + cgroup_parent: + description: + - Specify the parent cgroup for the container. + type: str + version_added: 1.1.0 + cleanup: + description: + - Use with I(detach=false) to remove the container after successful execution. + type: bool + default: no + command: + description: + - Command to execute when the container starts. A command may be either a string or a list. + - Prior to version 2.4, strings were split on commas. + - See I(command_handling) for differences in how strings and lists are handled. + type: raw + comparisons: + description: + - Allows to specify how properties of existing containers are compared with + module options to decide whether the container should be recreated / updated + or not. + - Only options which correspond to the state of a container as handled by the + Docker daemon can be specified, as well as C(networks). + - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) + and C(allow_more_present). + - If C(strict) is specified, values are tested for equality, and changes always + result in updating or restarting. If C(ignore) is specified, changes are ignored. + - C(allow_more_present) is allowed only for lists, sets and dicts. If it is + specified for lists or sets, the container will only be updated or restarted if + the module option contains a value which is not present in the container's + options. If the option is specified for a dict, the container will only be updated + or restarted if the module option contains a key which is not present in the + container's option, or if the value of a key present differs. + - The wildcard option C(*) can be used to set one of the default values C(strict) + or C(ignore) to I(all) comparisons which are not explicitly set to other values. + - See the examples for details. + type: dict + container_default_behavior: + description: + - In older versions of this module, various module options used to have default values. + This caused problems with containers which use different values for these options. + - The default value is now C(no_defaults). To restore the old behavior, set it to + C(compatibility), which will ensure that the default values are used when the values + are not explicitly specified by the user. + - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory), + I(paused), I(privileged), I(read_only) and I(tty) options. + type: str + choices: + - compatibility + - no_defaults + default: no_defaults + command_handling: + description: + - The default behavior for I(command) (when provided as a list) and I(entrypoint) is to + convert them to strings without considering shell quoting rules. (For comparing idempotency, + the resulting string is split considering shell quoting rules.) + - Also, setting I(command) to an empty list of string, and setting I(entrypoint) to an empty + list will be handled as if these options are not specified. This is different from idempotency + handling for other container-config related options. + - When this is set to C(compatibility), which was the default until community.docker 3.0.0, the + current behavior will be kept. + - When this is set to C(correct), these options are kept as lists, and an empty value or empty + list will be handled correctly for idempotency checks. This has been the default since + community.docker 3.0.0. + type: str + choices: + - compatibility + - correct + version_added: 1.9.0 + default: correct + cpu_period: + description: + - Limit CPU CFS (Completely Fair Scheduler) period. + - See I(cpus) for an easier to use alternative. + type: int + cpu_quota: + description: + - Limit CPU CFS (Completely Fair Scheduler) quota. + - See I(cpus) for an easier to use alternative. + type: int + cpus: + description: + - Specify how much of the available CPU resources a container can use. + - A value of C(1.5) means that at most one and a half CPU (core) will be used. + type: float + cpuset_cpus: + description: + - CPUs in which to allow execution C(1,3) or C(1-3). + type: str + cpuset_mems: + description: + - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1). + type: str + cpu_shares: + description: + - CPU shares (relative weight). + type: int + default_host_ip: + description: + - Define the default host IP to use. + - Must be an empty string, an IPv4 address, or an IPv6 address. + - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the + port bindings without an explicit IP address to only bind to IPv4. + See U(https://github.com/ansible-collections/community.docker/issues/70) for details. + - By default, the module will try to auto-detect this value from the C(bridge) network's + C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it + will fall back to C(0.0.0.0). + type: str + version_added: 1.2.0 + detach: + description: + - Enable detached mode to leave the container running in background. + - If disabled, the task will reflect the status of the container run (failed if the command failed). + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(true). + type: bool + devices: + description: + - List of host device bindings to add to the container. + - "Each binding is a mapping expressed in the format C(::)." + type: list + elements: str + device_read_bps: + description: + - "List of device path and read rate (bytes per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C([])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_write_bps: + description: + - "List of device and write rate (bytes per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C([])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_read_iops: + description: + - "List of device and read rate (IO per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + device_write_iops: + description: + - "List of device and write rate (IO per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + device_requests: + description: + - Allows to request additional resources, such as GPUs. + type: list + elements: dict + suboptions: + capabilities: + description: + - List of lists of strings to request capabilities. + - The top-level list entries are combined by OR, and for every list entry, + the entries in the list it contains are combined by AND. + - The driver tries to satisfy one of the sub-lists. + - Available capabilities for the C(nvidia) driver can be found at + U(https://github.com/NVIDIA/nvidia-container-runtime). + type: list + elements: list + count: + description: + - Number or devices to request. + - Set to C(-1) to request all available devices. + type: int + device_ids: + description: + - List of device IDs. + type: list + elements: str + driver: + description: + - Which driver to use for this device. + type: str + options: + description: + - Driver-specific options. + type: dict + version_added: 0.1.0 + dns_opts: + description: + - List of DNS options. + type: list + elements: str + dns_servers: + description: + - List of custom DNS servers. + type: list + elements: str + dns_search_domains: + description: + - List of custom DNS search domains. + type: list + elements: str + domainname: + description: + - Container domainname. + type: str + env: + description: + - Dictionary of key,value pairs. + - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss. + - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to + convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}"). + type: dict + env_file: + description: + - Path to a file, present on the target, containing environment variables I(FOO=BAR). + - If variable also present in I(env), then the I(env) value will override. + type: path + entrypoint: + description: + - Command that overwrites the default C(ENTRYPOINT) of the image. + - See I(command_handling) for differences in how strings and lists are handled. + type: list + elements: str + etc_hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's C(/etc/hosts) file. + type: dict + exposed_ports: + description: + - List of additional container ports which informs Docker that the container + listens on the specified network ports at runtime. + - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not + need to be exposed again. + type: list + elements: str + aliases: + - exposed + - expose + force_kill: + description: + - Use the kill command when stopping a running container. + type: bool + default: no + aliases: + - forcekill + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + type: list + elements: str + healthcheck: + description: + - Configure a check that is run to determine whether or not containers for this service are "healthy". + - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) + for details on how healthchecks work." + - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format + that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + type: dict + suboptions: + test: + description: + - Command to run to check health. + - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + type: raw + interval: + description: + - Time between running the check. + - The default used by the Docker daemon is C(30s). + type: str + timeout: + description: + - Maximum time to allow one check to run. + - The default used by the Docker daemon is C(30s). + type: str + retries: + description: + - Consecutive number of failures needed to report unhealthy. + - The default used by the Docker daemon is C(3). + type: int + start_period: + description: + - Start period for the container to initialize before starting health-retries countdown. + - The default used by the Docker daemon is C(0s). + type: str + hostname: + description: + - The container's hostname. + type: str + ignore_image: + description: + - When I(state) is C(present) or C(started), the module compares the configuration of an existing + container to requested configuration. The evaluation includes the image version. If the image + version in the registry does not match the container, the container will be recreated. You can + stop this behavior by setting I(ignore_image) to C(True). + - "B(Warning:) This option is ignored if C(image: ignore) or C(*: ignore) is specified in the + I(comparisons) option." + type: bool + default: no + image: + description: + - Repository path and tag used to create the container. If an image is not found or pull is true, the image + will be pulled from the registry. If no tag is included, C(latest) will be used. + - Can also be an image ID. If this is the case, the image is assumed to be available locally. + The I(pull) option is ignored for this case. + type: str + image_label_mismatch: + description: + - How to handle labels inherited from the image that are not set explicitly. + - When C(ignore), labels that are present in the image but not specified in I(labels) will be + ignored. This is useful to avoid having to specify the image labels in I(labels) while keeping + labels I(comparisons) C(strict). + - When C(fail), if there are labels present in the image which are not set from I(labels), the + module will fail. This prevents introducing unexpected labels from the base image. + - "B(Warning:) This option is ignored unless C(labels: strict) or C(*: strict) is specified in + the I(comparisons) option." + type: str + choices: + - 'ignore' + - 'fail' + default: ignore + version_added: 2.6.0 + init: + description: + - Run an init inside the container that forwards signals and reaps processes. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + interactive: + description: + - Keep stdin open after a container is launched, even if not attached. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + ipc_mode: + description: + - Set the IPC mode for the container. + - Can be one of C(container:) to reuse another container's IPC namespace or C(host) to use + the host's IPC namespace within the container. + type: str + keep_volumes: + description: + - Retain anonymous volumes associated with a removed container. + type: bool + default: yes + kill_signal: + description: + - Override default signal used to kill a running container. + type: str + kernel_memory: + description: + - "Kernel memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." + - Omitting the unit defaults to bytes. + type: str + labels: + description: + - Dictionary of key value pairs. + type: dict + links: + description: + - List of name aliases for linked containers in the format C(container_name:alias). + - Setting this will force container to be restarted. + type: list + elements: str + log_driver: + description: + - Specify the logging driver. Docker uses C(json-file) by default. + - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. + type: str + log_options: + description: + - Dictionary of options specific to the chosen I(log_driver). + - See U(https://docs.docker.com/engine/admin/logging/overview/) for details. + - I(log_driver) needs to be specified for I(log_options) to take effect, even if using the default C(json-file) driver. + type: dict + aliases: + - log_opt + mac_address: + description: + - Container MAC address (for example, C(92:d0:c6:0a:29:33)). + type: str + memory: + description: + - "Memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C("0"). + type: str + memory_reservation: + description: + - "Memory soft limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swap: + description: + - "Total memory limit (memory + swap) in format C([]), or + the special values C(unlimited) or C(-1) for unlimited swap usage. + Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), + C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swappiness: + description: + - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - If not set, the value will be remain the same if container exists and will be inherited + from the host machine if it is (re-)created. + type: int + mounts: + type: list + elements: dict + description: + - Specification for mounts to be added to the container. More powerful alternative to I(volumes). + suboptions: + target: + description: + - Path inside the container. + type: str + required: true + source: + description: + - Mount source. + - For example, this can be a volume name or a host path. + - If not supplied when I(type=volume) an anonymous volume will be created. + type: str + type: + description: + - The mount type. + - Note that C(npipe) is only supported by Docker for Windows. + type: str + choices: + - bind + - npipe + - tmpfs + - volume + default: volume + read_only: + description: + - Whether the mount should be read-only. + type: bool + consistency: + description: + - The consistency requirement for the mount. + type: str + choices: + - cached + - consistent + - default + - delegated + propagation: + description: + - Propagation mode. Only valid for the C(bind) type. + type: str + choices: + - private + - rprivate + - shared + - rshared + - slave + - rslave + no_copy: + description: + - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. + - The default value is C(false). + type: bool + labels: + description: + - User-defined name and labels for the volume. Only valid for the C(volume) type. + type: dict + volume_driver: + description: + - Specify the volume driver. Only valid for the C(volume) type. + - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: str + volume_options: + description: + - Dictionary of options specific to the chosen volume_driver. See + L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: dict + tmpfs_size: + description: + - "The size for the tmpfs mount in bytes in format []." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + tmpfs_mode: + description: + - The permission mode for the tmpfs mount. + type: str + name: + description: + - Assign a name to a new container or match an existing container. + - When identifying an existing container name may be a name or a long or short container ID. + type: str + required: yes + network_mode: + description: + - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:), C() or C(default). + - "Since community.docker 2.0.0, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network, + the default value for I(network_mode) is the name of the first network in the I(networks) list. You can prevent this + by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if + I(network_mode) is not specified." + type: str + userns_mode: + description: + - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string. + type: str + networks: + description: + - List of networks the container belongs to. + - For examples of the data structure and usage see EXAMPLES below. + - To remove a container from one or more networks, use the I(purge_networks) option. + - If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified. + This is different from the behavior of C(docker run ...). You need to explicitly use I(purge_networks) to enforce + the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case. + type: list + elements: dict + suboptions: + name: + description: + - The network's name. + type: str + required: yes + ipv4_address: + description: + - The container's IPv4 address in this network. + type: str + ipv6_address: + description: + - The container's IPv6 address in this network. + type: str + links: + description: + - A list of containers to link to. + type: list + elements: str + aliases: + description: + - List of aliases for this container in this network. These names + can be used in the network to reach this container. + type: list + elements: str + networks_cli_compatible: + description: + - "If I(networks_cli_compatible) is set to C(yes) (default), this module will behave as + C(docker run --network) and will B(not) add the default network if I(networks) is + specified. If I(networks) is not specified, the default network will be attached." + - "When I(networks_cli_compatible) is set to C(no) and networks are provided to the module + via the I(networks) option, the module behaves differently than C(docker run --network): + C(docker run --network other) will create a container with network C(other) attached, + but the default network not attached. This module with I(networks: {name: other}) will + create a container with both C(default) and C(other) attached. If I(purge_networks) is + set to C(yes), the C(default) network will be removed afterwards." + type: bool + default: true + oom_killer: + description: + - Whether or not to disable OOM Killer for the container. + type: bool + oom_score_adj: + description: + - An integer value containing the score given to the container in order to tune + OOM killer preferences. + type: int + output_logs: + description: + - If set to true, output of the container command will be printed. + - Only effective when I(log_driver) is set to C(json-file), C(journald), or C(local). + type: bool + default: no + paused: + description: + - Use with the started state to pause running processes inside the container. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + pid_mode: + description: + - Set the PID namespace mode for the container. + - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the + Docker SDK for Python (docker) allow all values supported by the Docker daemon. + type: str + pids_limit: + description: + - Set PIDs limit for the container. It accepts an integer value. + - Set C(-1) for unlimited PIDs. + type: int + privileged: + description: + - Give extended privileges to the container. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + publish_all_ports: + description: + - Publish all ports to the host. + - Any specified port bindings from I(published_ports) will remain intact when C(true). + type: bool + version_added: 1.8.0 + published_ports: + description: + - List of ports to publish from the container to the host. + - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + container port, 9000 is a host port, and 0.0.0.0 is a host interface." + - Port ranges can be used for source and destination ports. If two ranges with + different lengths are specified, the shorter range will be used. + Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned + to the first port of the destination range, but to a free port in that range. This is the + same behavior as for C(docker) command line utility. + - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are B(not) allowed. This + is different from the C(docker) command line utility. Use the R(dig lookup,ansible_collections.community.general.dig_lookup) + to resolve hostnames." + - If I(networks) parameter is provided, will inspect each network to see if there exists + a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4). + If such a network is found, then published ports where no host IP address is specified + will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4). + Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4) + value encountered in the list of I(networks) is the one that will be used. + - The value C(all) was allowed in earlier versions of this module. Support for it was removed in + community.docker 3.0.0. Use the I(publish_all_ports) option instead. + type: list + elements: str + aliases: + - ports + pull: + description: + - If true, always pull the latest version of an image. Otherwise, will only pull an image + when missing. + - "B(Note:) images are only pulled when specified by name. If the image is specified + as a image ID (hash), it cannot be pulled." + type: bool + default: no + purge_networks: + description: + - Remove the container from ALL networks not included in I(networks) parameter. + - Any default networks such as C(bridge), if not found in I(networks), will be removed as well. + type: bool + default: no + read_only: + description: + - Mount the container's root file system as read-only. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + recreate: + description: + - Use with present and started states to force the re-creation of an existing container. + type: bool + default: no + removal_wait_timeout: + description: + - When removing an existing container, the docker daemon API call exists after the container + is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O + load, removal can take longer. By default, the module will wait until the container has been + removed before trying to (re-)create it, however long this takes. + - By setting this option, the module will wait at most this many seconds for the container to be + removed. If the container is still in the removal phase after this many seconds, the module will + fail. + type: float + restart: + description: + - Use with started state to force a matching container to be stopped and restarted. + type: bool + default: no + restart_policy: + description: + - Container restart policy. + - Place quotes around C(no) option. + type: str + choices: + - 'no' + - 'on-failure' + - 'always' + - 'unless-stopped' + restart_retries: + description: + - Use with restart policy to control maximum number of restart attempts. + type: int + runtime: + description: + - Runtime to use for the container. + type: str + shm_size: + description: + - "Size of C(/dev/shm) in format C([]). Number is positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M). + type: str + security_opts: + description: + - List of security options in the form of C("label:user:User"). + type: list + elements: str + state: + description: + - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container + rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.' + - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no + container matches the name, a container will be created. If a container matches the name but the provided configuration + does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created + with the requested config.' + - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running + state. Use I(restart) to force a matching container to be stopped and restarted.' + - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped + state.' + - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the + image version will be taken into account, you can also use the I(ignore_image) option. + - Use the I(recreate) option to always force re-creation of a matching container, even if it is running. + - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is + C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container. + - Use I(keep_volumes) to retain anonymous volumes associated with a removed container. + type: str + default: started + choices: + - absent + - present + - stopped + - started + stop_signal: + description: + - Override default signal used to stop the container. + type: str + stop_timeout: + description: + - Number of seconds to wait for the container to stop before sending C(SIGKILL). + When the container is created by this module, its C(StopTimeout) configuration + will be set to this value. + - When the container is stopped, will be used as a timeout for stopping the + container. In case the container has a custom C(StopTimeout) configuration, + the behavior depends on the version of the docker daemon. New versions of + the docker daemon will always use the container's configured C(StopTimeout) + value if it has been configured. + type: int + storage_opts: + description: + - Storage driver options for this container as a key-value mapping. + type: dict + version_added: 1.3.0 + tmpfs: + description: + - Mount a tmpfs directory. + type: list + elements: str + tty: + description: + - Allocate a pseudo-TTY. + - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + type: bool + ulimits: + description: + - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)." + type: list + elements: str + sysctls: + description: + - Dictionary of key,value pairs. + type: dict + user: + description: + - Sets the username or UID used and optionally the groupname or GID for the specified command. + - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)." + type: str + uts: + description: + - Set the UTS namespace mode for the container. + type: str + volumes: + description: + - List of volumes to mount within the container. + - "Use docker CLI-style syntax: C(/host:/container[:mode])" + - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), + C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and + C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." + - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. + - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), + C(z), and C(Z)." + type: list + elements: str + volume_driver: + description: + - The container volume driver. + type: str + volumes_from: + description: + - List of container names or IDs to get volumes from. + type: list + elements: str + working_dir: + description: + - Path to the working directory. + type: str +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" + - "Daan Oosterveld (@dusdanig)" + - "Chris Houseknecht (@chouseknecht)" + - "Kassian Sun (@kassiansun)" + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" + - "Docker API >= 1.25" +''' + +EXAMPLES = ''' +- name: Create a data container + community.docker.docker_container: + name: mydata + image: busybox + volumes: + - /data + +- name: Re-create a redis container + community.docker.docker_container: + name: myredis + image: redis + command: redis-server --appendonly yes + state: present + recreate: yes + exposed_ports: + - 6379 + volumes_from: + - mydata + +- name: Restart a container + community.docker.docker_container: + name: myapplication + image: someuser/appimage + state: started + restart: yes + links: + - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" + ports: + # Publish container port 9000 as host port 8080 + - "8080:9000" + # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1 + - "127.0.0.1:8081:9001/udp" + # Publish container port 9002 as a random host port + - "9002" + # Publish container port 9003 as a free host port in range 8000-8100 + # (the host port will be selected by the Docker daemon) + - "8000-8100:9003" + # Publish container ports 9010-9020 to host ports 7000-7010 + - "7000-7010:9010-9020" + env: + SECRET_KEY: "ssssh" + # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted + BOOLEAN_KEY: "yes" + +- name: Container present + community.docker.docker_container: + name: mycontainer + state: present + image: ubuntu:14.04 + command: sleep infinity + +- name: Stop a container + community.docker.docker_container: + name: mycontainer + state: stopped + +- name: Start 4 load-balanced containers + community.docker.docker_container: + name: "container{{ item }}" + recreate: yes + image: someuser/anotherappimage + command: sleep 1d + with_sequence: count=4 + +- name: Remove container + community.docker.docker_container: + name: ohno + state: absent + +- name: Syslogging output + community.docker.docker_container: + name: myservice + image: busybox + log_driver: syslog + log_options: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for + # older docker installs, use "syslog-tag" instead + tag: myservice + +- name: Create db container and connect to network + community.docker.docker_container: + name: db_test + image: "postgres:latest" + networks: + - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link + community.docker.docker_container: + name: sleeper + image: ubuntu:14.04 + networks: + - name: TestingNet + ipv4_address: "172.16.1.100" + aliases: + - sleepyzz + links: + - db_test:db + - name: TestingNet2 + +- name: Start a container with a command + community.docker.docker_container: + name: sleepy + image: ubuntu:14.04 + command: ["sleep", "infinity"] + +- name: Add container to networks + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet + ipv4_address: 172.16.1.18 + links: + - sleeper + - name: TestingNet2 + ipv4_address: 172.16.10.20 + +- name: Update network with aliases + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet + aliases: + - sleepyz + - zzzz + +- name: Remove container from one network + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet2 + purge_networks: yes + +- name: Remove container from all networks + community.docker.docker_container: + name: sleepy + purge_networks: yes + +- name: Start a container and use an env file + community.docker.docker_container: + name: agent + image: jenkinsci/ssh-slave + env_file: /var/tmp/jenkins/agent.env + +- name: Create a container with limited capabilities + community.docker.docker_container: + name: sleepy + image: ubuntu:16.04 + command: sleep infinity + capabilities: + - sys_time + cap_drop: + - all + +- name: Finer container restart/update control + community.docker.docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + volumes: + - /tmp:/tmp + comparisons: + image: ignore # do not restart containers with older versions of the image + env: strict # we want precisely this environment + volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there + +- name: Finer container restart/update control II + community.docker.docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + comparisons: + '*': ignore # by default, ignore *all* options (including image) + env: strict # except for environment variables; there, we want to be strict + +- name: Start container with healthstatus + community.docker.docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # Check if nginx server is healthy by curl'ing the server. + # If this fails or timeouts, the healthcheck fails. + test: ["CMD", "curl", "--fail", "http://nginx.host.com"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 30s + +- name: Remove healthcheck from container + community.docker.docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # The "NONE" check needs to be specified + test: ["NONE"] + +- name: Start container with block device read limit + community.docker.docker_container: + name: test + image: ubuntu:18.04 + state: started + device_read_bps: + # Limit read rate for /dev/sda to 20 mebibytes per second + - path: /dev/sda + rate: 20M + device_read_iops: + # Limit read rate for /dev/sdb to 300 IO per second + - path: /dev/sdb + rate: 300 + +- name: Start container with GPUs + community.docker.docker_container: + name: test + image: ubuntu:18.04 + state: started + device_requests: + - # Add some specific devices to this container + device_ids: + - '0' + - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + - # Add nVidia GPUs to this container + driver: nvidia + count: -1 # this means we want all + capabilities: + # We have one OR condition: 'gpu' AND 'utility' + - - gpu + - utility + # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities + # for a list of capabilities supported by the nvidia driver + +- name: Start container with storage options + community.docker.docker_container: + name: test + image: ubuntu:18.04 + state: started + storage_opts: + # Limit root filesystem to 12 MB - note that this requires special storage backends + # (https://fabianlee.org/2020/01/15/docker-use-overlay2-with-an-xfs-backing-filesystem-to-limit-rootfs-size/) + size: 12m +''' + +RETURN = ''' +container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Empty if I(state) is C(absent). + - If I(detach=false), will include C(Output) attribute containing any output from container run. + returned: success; or when I(state=started) and I(detach=false), and when waiting for the container result did not fail + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +status: + description: + - In case a container is started without detaching, this contains the exit code of the process in the container. + - Before community.docker 1.1.0, this was only returned when non-zero. + returned: when I(state=started) and I(detach=false), and when waiting for the container result did not fail + type: int + sample: 0 +''' + +import os +import re +import shlex +import traceback +from time import sleep + +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.six import string_types + +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) +from ansible_collections.community.docker.plugins.module_utils.util import ( + DifferenceTracker, + DockerBaseClass, + compare_generic, + is_image_name_id, + sanitize_result, + clean_dict_booleans_for_docker_api, + omit_none_from_dict, + parse_healthcheck, + DOCKER_COMMON_ARGS, +) +from ansible_collections.community.docker.plugins.module_utils.module_container import ( + DockerAPIEngineDriver, + OPTIONS, +) + + +class ContainerManager(object): + def __init__(self, client, options): + self.client = client + self.options = options + self.results = {} + + def run(self): + # TODO + pass + + +def main(): + argument_spec = dict( + cleanup=dict(type='bool', default=False), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + command_handling=dict(type='str', choices=['compatibility', 'correct']), + default_host_ip=dict(type='str'), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), + keep_volumes=dict(type='bool', default=True), + kill_signal=dict(type='str'), + name=dict(type='str', required=True), + pull=dict(type='bool', default=False), + recreate=dict(type='bool', default=False), + restart=dict(type='bool', default=False), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + stop_signal=dict(type='str'), + ) + + mutually_exclusive = [] + required_together = [] + required_one_of = [] + required_if = [ + ('state', 'present', ['image']) + ] + required_by = {} + + for options in OPTIONS: + mutually_exclusive.extend(options.ansible_mutually_exclusive) + required_together.extend(options.ansible_required_together) + required_one_of.extend(options.ansible_required_one_of) + required_if.extend(options.ansible_required_if) + required_by.update(options.ansible_required_by) + argument_spec.update(options.argument_spec) + + client = AnsibleDockerClientContainer( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + required_if=required_if, + required_by=required_by, + supports_check_mode=True, + ) + if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + client.module.params['network_mode'] = client.module.params['networks'][0]['name'] + + try: + cm = ContainerManager(client, OPTIONS) + cm.run() + client.module.exit_json(**sanitize_result(cm.results)) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() From 216be8bf4b146b35c762691df24b7b65f40094c9 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 3 Jul 2022 17:06:22 +0200 Subject: [PATCH 05/38] Fix bugs. --- plugins/module_utils/common.py | 5 ++-- plugins/module_utils/common_api.py | 3 ++- plugins/module_utils/module_container.py | 29 ++++++++++++++---------- plugins/modules/docker_container2.py | 14 +++++++++++- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 3081941bd..01ee330f3 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -557,8 +557,8 @@ def inspect_distribution(self, image, **kwargs): class AnsibleDockerClient(AnsibleDockerClientBase): def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, - required_together=None, required_if=None, required_one_of=None, min_docker_version=None, - min_docker_api_version=None, option_minimal_versions=None, + required_together=None, required_if=None, required_one_of=None, required_by=None, + min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None, option_minimal_versions_ignore_params=None, fail_results=None): # Modules can put information in here which will always be returned @@ -588,6 +588,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, + required_by=required_by, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/common_api.py b/plugins/module_utils/common_api.py index da844d441..e0eaeded1 100644 --- a/plugins/module_utils/common_api.py +++ b/plugins/module_utils/common_api.py @@ -467,7 +467,7 @@ def pull_image(self, name, tag="latest", platform=None): class AnsibleDockerClient(AnsibleDockerClientBase): def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, - required_together=None, required_if=None, required_one_of=None, + required_together=None, required_if=None, required_one_of=None, required_by=None, min_docker_api_version=None, option_minimal_versions=None, option_minimal_versions_ignore_params=None, fail_results=None): @@ -498,6 +498,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, + required_by=required_by, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 2638b8456..c31db7d9c 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -18,7 +18,7 @@ def _get_ansible_type(type): if type == 'set': return 'list' - if type not in ('list', 'dict', 'int', 'float', 'str'): + if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): raise Exception('Invalid type "%s"' % (type, )) return type @@ -78,7 +78,7 @@ def preprocess(module, values): self.ansible_required_together = ansible_required_together or [] self.ansible_required_one_of = ansible_required_one_of or [] self.ansible_required_if = ansible_required_if or [] - self.ansible_required_by = ansible_required_by of {} + self.ansible_required_by = ansible_required_by or {} self.argument_spec = {} def add_option(self, *args, **kwargs): @@ -91,11 +91,17 @@ def add_option(self, *args, **kwargs): ansible_option['elements'] = option.ansible_elements if option.ansible_suboptions is not None: ansible_option['options'] = option.ansible_suboptions - if options.ansible_aliases: + if option.ansible_aliases: ansible_option['aliases'] = option.ansible_aliases self.argument_spec[option.name] = ansible_option return self + def supports_engine(self, engine_name): + return engine_name in self.engines + + def get_engine(self, engine_name): + return self.engines[engine_name] + def add_docker_api(self, docker_api): self.engines['docker_api'] = docker_api return self @@ -175,7 +181,7 @@ def update_value(module, data, api_version, options, values): value = values[options[0].name] if preprocess_for_set: value = preprocess_for_set(module, api_version, value) - data[update_paramete] = value + data[update_parameter] = value return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) @@ -228,7 +234,7 @@ def update_value(module, data, api_version, options, values): value = values[options[0].name] if preprocess_for_set: value = preprocess_for_set(module, api_version, value) - data[update_paramete] = value + data[update_parameter] = value return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) @@ -344,7 +350,7 @@ def _preprocess_etc_hosts(module, api_version, value): return value results = [] for key, value in value.items(): - results.append("%s%s%s" % (key, ':', value)) + results.append('%s%s%s' % (key, ':', value)) return results @@ -380,9 +386,9 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): value = unlimited_value else: value = human_to_bytes(value) - values[name] = + values[name] = value except ValueError as exc: - self.fail("Failed to convert %s to bytes: %s" % (name, to_native(exc))) + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) def _preprocess_links(module, api_version, value): @@ -396,7 +402,7 @@ def _preprocess_links(module, api_version, value): link, alias = parsed_link else: link, alias = parsed_link[0], parsed_link[0] - result.append("/%s:/%s/%s" % (link, module.params['name'], alias)) + result.append('/%s:/%s/%s' % (link, module.params['name'], alias)) return result @@ -502,7 +508,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('dns_opts', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions'), + .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions')), OptionGroup() .add_option('dns_search_domains', type='list', elements='str') @@ -548,7 +554,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('links', type='set', elements='list', ansible_elements='str') - .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_values=_preprocess_links)), + .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_value=_preprocess_links)), OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) .add_option('memory', type='int', ansible_type='str') @@ -616,7 +622,6 @@ def _preprocess_links(module, api_version, value): # aliases=dict(type='list', elements='str'), # links=dict(type='list', elements='str'), # )), -# networks_cli_compatible=dict(type='bool', default=True), # oom_killer=dict(type='bool'), # oom_score_adj=dict(type='int'), # output_logs=dict(type='bool', default=False), diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 577fa3e8b..1824ebdc8 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1260,6 +1260,7 @@ def main(): keep_volumes=dict(type='bool', default=True), kill_signal=dict(type='str'), name=dict(type='str', required=True), + networks_cli_compatible=dict(type='bool', default=True), pull=dict(type='bool', default=False), recreate=dict(type='bool', default=False), restart=dict(type='bool', default=False), @@ -1275,7 +1276,12 @@ def main(): ] required_by = {} + option_minimal_versions = {} + for options in OPTIONS: + if not options.supports_engine('docker_api'): + continue + mutually_exclusive.extend(options.ansible_mutually_exclusive) required_together.extend(options.ansible_required_together) required_one_of.extend(options.ansible_required_one_of) @@ -1283,13 +1289,19 @@ def main(): required_by.update(options.ansible_required_by) argument_spec.update(options.argument_spec) - client = AnsibleDockerClientContainer( + engine = options.get_engine('docker_api') + if engine.min_docker_api is not None: + for option in options.options: + option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} + + client = AnsibleDockerClient( argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_together=required_together, required_one_of=required_one_of, required_if=required_if, required_by=required_by, + option_minimal_versions=option_minimal_versions, supports_check_mode=True, ) if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: From b96c108c450d5f2dfcafa3c444efffef8ce740eb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 7 Jul 2022 18:49:43 +0200 Subject: [PATCH 06/38] Complete first basic implementation. --- changelogs/fragments/docker_container.yml | 9 + plugins/module_utils/_api/api/client.py | 3 + plugins/module_utils/module_container.py | 28 +- plugins/modules/docker_container2.py | 875 +++++++++++++++++++++- 4 files changed, 881 insertions(+), 34 deletions(-) create mode 100644 changelogs/fragments/docker_container.yml diff --git a/changelogs/fragments/docker_container.yml b/changelogs/fragments/docker_container.yml new file mode 100644 index 000000000..1dfb75edf --- /dev/null +++ b/changelogs/fragments/docker_container.yml @@ -0,0 +1,9 @@ +major_changes: + - "docker_container - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, + and depending on the features used has some more requirements. If the Docker SDK for Python is installed, + these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/...)." +breaking_changes: + - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." diff --git a/plugins/module_utils/_api/api/client.py b/plugins/module_utils/_api/api/client.py index 554393253..b930cb95f 100644 --- a/plugins/module_utils/_api/api/client.py +++ b/plugins/module_utils/_api/api/client.py @@ -545,6 +545,9 @@ def delete_call(self, pathfmt, *args, **kwargs): def delete_json(self, pathfmt, *args, **kwargs): return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True) + def post_call(self, pathfmt, *args, **kwargs): + self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs)) + def post_json(self, pathfmt, *args, **kwargs): data = kwargs.pop('data', None) self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs)) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index c31db7d9c..d4b66793a 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -28,11 +28,13 @@ def __init__( self, name, type, + owner, ansible_type=None, elements=None, ansible_elements=None, ansible_suboptions=None, ansible_aliases=None, + ansible_choices=None, ): self.name = name self.type = type @@ -55,7 +57,8 @@ def __init__( if ansible_suboptions is None and needs_suboptions: raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') self.ansible_suboptions = ansible_suboptions if needs_suboptions else None - self.ansible_aliases = ansible_aliases + self.ansible_aliases = ansible_aliases or [] + self.ansible_choices = ansible_choices class OptionGroup(object): @@ -82,7 +85,7 @@ def preprocess(module, values): self.argument_spec = {} def add_option(self, *args, **kwargs): - option = Option(*args, **kwargs) + option = Option(*args, owner=self, **kwargs) self.options.append(option) ansible_option = { 'type': option.ansible_type, @@ -93,6 +96,8 @@ def add_option(self, *args, **kwargs): ansible_option['options'] = option.ansible_suboptions if option.ansible_aliases: ansible_option['aliases'] = option.ansible_aliases + if option.ansible_choices is not None: + ansible_option['choices'] = option.ansible_choices self.argument_spec[option.name] = ansible_option return self @@ -587,6 +592,11 @@ def _preprocess_links(module, api_version, value): # .add_option('log_options', type='dict', ansible_aliases=['log_opt']) # .add_docker_api(...) +# OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) +# .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) +# .add_option('restart_retries', type='int') +# .add_docker_api(...) + # if self.mac_address: # # Ensure the MAC address uses colons instead of hyphens for later comparison # self.mac_address = self.mac_address.replace('-', ':') @@ -615,27 +625,14 @@ def _preprocess_links(module, api_version, value): # tmpfs_mode=dict(type='str'), # )), # network_mode=dict(type='str'), -# networks=dict(type='list', elements='dict', options=dict( -# name=dict(type='str', required=True), -# ipv4_address=dict(type='str'), -# ipv6_address=dict(type='str'), -# aliases=dict(type='list', elements='str'), -# links=dict(type='list', elements='str'), -# )), # oom_killer=dict(type='bool'), # oom_score_adj=dict(type='int'), -# output_logs=dict(type='bool', default=False), -# paused=dict(type='bool'), # pid_mode=dict(type='str'), # pids_limit=dict(type='int'), # privileged=dict(type='bool'), # publish_all_ports=dict(type='bool'), # published_ports=dict(type='list', elements='str', aliases=['ports']), -# purge_networks=dict(type='bool', default=False), # read_only=dict(type='bool'), -# removal_wait_timeout=dict(type='float'), -# restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), -# restart_retries=dict(type='int'), # runtime=dict(type='str'), # security_opts=dict(type='list', elements='str'), # shm_size=dict(type='str'), @@ -656,7 +653,6 @@ def _preprocess_links(module, api_version, value): # explicit_types = dict( # env='set', # mounts='set(dict)', -# networks='set(dict)', # ulimits='set(dict)', # ) # diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 1824ebdc8..8ba545e76 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -670,8 +670,6 @@ pid_mode: description: - Set the PID namespace mode for the container. - - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the - Docker SDK for Python (docker) allow all values supported by the Docker daemon. type: str pids_limit: description: @@ -898,7 +896,6 @@ - "Felix Fontein (@felixfontein)" requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" - "Docker API >= 1.25" ''' @@ -1218,6 +1215,10 @@ AnsibleDockerClient, RequestException, ) +from ansible_collections.community.docker.plugins.module_utils.module_container import ( + DockerAPIEngineDriver, + OPTIONS, +) from ansible_collections.community.docker.plugins.module_utils.util import ( DifferenceTracker, DockerBaseClass, @@ -1229,21 +1230,849 @@ parse_healthcheck, DOCKER_COMMON_ARGS, ) -from ansible_collections.community.docker.plugins.module_utils.module_container import ( - DockerAPIEngineDriver, - OPTIONS, -) + +from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag, normalize_links -class ContainerManager(object): - def __init__(self, client, options): +class Container(DockerBaseClass): + def __init__(self, container): + super(Container, self).__init__() + self.raw = container + self.Id = None + self.Image = None + self.container = container + if container: + self.Id = container['Id'] + self.Image = container['Image'] + self.log(self.container, pretty_print=True) + + @property + def exists(self): + return True if self.container else False + + @property + def removing(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Status') == 'removing' + return False + + @property + def running(self): + if self.container and self.container.get('State'): + if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): + return True + return False + + @property + def paused(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Paused', False) + return False + + +class ContainerManager(DockerBaseClass): + def __init__(self, client, active_options): self.client = client - self.options = options - self.results = {} + self.options = active_options + self.all_options = {} + for options in active_options: + for option in options.options: + self.all_options[option.name] = option + self.module = client.module + self.comparisons = {} + self._parse_comparisons() + self._update_params() + + self.check_mode = self.client.check_mode + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.diff_tracker = DifferenceTracker() + self.facts = {} + + def _update_params(self): + if self.module.params['networks_cli_compatible'] is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + self.module.params['network_mode'] = self.module.params['networks'][0]['name'] + if self.module.params['container_default_behavior'] == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory='0', + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + def _parse_comparisons(self): + # Create default values for comparisons + self.comparisons['image'] = { + 'type': 'value', + 'comparison': 'strict', + 'name': 'image', + } + self.comparisons['networks'] = { + 'type': 'set(dict)', + 'comparison': 'allow_more_present', + 'name': 'networks', + } + default_comparison_values = dict( + stop_timeout='ignore', + ) + comp_aliases = {} + for option_name, option in self.all_options.items(): + # Keep track of all aliases + comp_aliases[option_name] = option_name + for alias in option.ansible_aliases: + comp_aliases[alias] = option_name + # Determine datatype + datatype = option.type + if datatype == 'set' and option.elements == 'dict': + datatype = 'set(dict)' + elif datatype not in ('set', 'list', 'dict'): + datatype = 'value' + # Determine comparison + if option in default_comparison_values: + comparison = default_comparison_values[option] + elif datatype in ('list', 'value'): + comparison = 'strict' + else: + comparison = 'allow_more_present' + self.comparisons[option_name] = { + 'type': datatype, + 'comparison': comparison, + 'name': option_name, + } + # Collect all module options + all_module_options = set(comp_aliases) + for option, data in self.module.argument_spec.items(): + all_module_options.add(option) + if 'aliases' in data: + for alias in data['aliases']: + all_module_options.add(alias) + # Process legacy ignore options + if self.module.params['ignore_image']: + self.comparisons['image']['comparison'] = 'ignore' + if self.module.params['purge_networks']: + self.comparisons['networks']['comparison'] = 'strict' + # Process comparsions specified by user + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option, v in self.comparisons.items(): + if option == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + v['comparison'] = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_module_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + if key not in self.comparisons: + self.fail("Unknown module option '%s' in comparisons dict!" % key) + key_main = key + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + self.comparisons[key_main]['comparison'] = value + elif value == 'allow_more_present': + if self.comparisons[key_main]['type'] == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + self.comparisons[key_main]['comparison'] = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Check legacy values + if self.module.params['ignore_image'] and self.comparisons['image']['comparison'] != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.module.params['purge_networks'] and self.comparisons['networks']['comparison'] != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + + def fail(self, *args, **kwargs): + self.client.fail(*args, **kwargs) def run(self): - # TODO - pass + state = self.module.params['state'] + if state in ('stopped', 'started', 'present'): + self.present(state) + elif state == 'absent': + self.absent() + + if not self.check_mode and not self.module.params['debug']: + self.results.pop('actions') + + if self.module._diff or self.module.params['debug']: + self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff + + if self.facts: + self.results['container'] = self.facts + + def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): + delay = 1.0 + total_wait = 0 + while True: + # Inspect container + result = self.client.get_container_by_id(container_id) + if result is None: + if accept_removal: + return + msg = 'Encontered vanished container while waiting for container "{0}"' + self.fail(msg.format(container_id)) + # Check container state + state = result.get('State', {}).get('Status') + if complete_states is not None and state in complete_states: + return + if wait_states is not None and state not in wait_states: + msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' + self.fail(msg.format(container_id, state)) + # Wait + if max_wait is not None: + if total_wait > max_wait: + msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' + self.fail(msg.format(container_id, max_wait)) + if total_wait + delay > max_wait: + delay = max_wait - total_wait + sleep(delay) + total_wait += delay + # Exponential backoff, but never wait longer than 10 seconds + # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations + # until the maximal 10 seconds delay is reached. By then, the + # code will have slept for ~1.5 minutes.) + delay = min(delay * 1.1, 10) + + def present(self, state): + container = self._get_container(self.module.params['name']) + was_running = container.running + was_paused = container.paused + container_created = False + + # If the image parameter was passed then we need to deal with the image + # version comparison. Otherwise we handle this depending on whether + # the container already runs or not; in the former case, in case the + # container needs to be restarted, we use the existing container's + # image ID. + image = self._get_image() + self.log(image, pretty_print=True) + if not container.exists or container.removing: + # New container + if container.removing: + self.log('Found container in removal phase') + else: + self.log('No container found') + if not self.module.params['image']: + self.fail('Cannot create container when image is not specified!') + self.diff_tracker.add('exists', parameter=True, active=False) + if container.removing and not self.check_mode: + # Wait for container to be removed before trying to create it + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.module.params['removal_wait_timeout']) + new_container = self.container_create(self.module.params['image']) + if new_container: + container = new_container + container_created = True + else: + # Existing container + different, differences = self.has_different_configuration(container, image) + image_different = False + if self.comparisons['image']['comparison'] == 'strict': + image_different = self._image_is_different(image, container) + if image_different or different or self.module.params['recreate']: + self.diff_tracker.merge(differences) + self.diff['differences'] = differences.get_legacy_docker_container_diffs() + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) + image_to_use = self.module.params['image'] + if not image_to_use and container and container.Image: + image_to_use = container.Image + if not image_to_use: + self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') + if container.running: + self.container_stop(container.Id) + self.container_remove(container.Id) + if not self.check_mode: + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.module.params['removal_wait_timeout']) + new_container = self.container_create(image_to_use) + if new_container: + container = new_container + container_created = True + + if container and container.exists: + container = self.update_limits(container) + container = self.update_networks(container, container_created) + + if state == 'started' and not container.running: + self.diff_tracker.add('running', parameter=True, active=was_running) + container = self.container_start(container.Id) + elif state == 'started' and self.module.params['restart']: + self.diff_tracker.add('running', parameter=True, active=was_running) + self.diff_tracker.add('restarted', parameter=True, active=False) + container = self.container_restart(container.Id) + elif state == 'stopped' and container.running: + self.diff_tracker.add('running', parameter=False, active=was_running) + self.container_stop(container.Id) + container = self._get_container(container.Id) + + if state == 'started' and self.module.params['paused'] is not None and container.paused != self.module.params['paused']: + self.diff_tracker.add('paused', parameter=self.module.params['paused'], active=was_paused) + if not self.check_mode: + try: + if self.module.params['paused']: + self.client.post_call('/containers/{0}/pause', container.Id) + else: + self.client.post_call('/containers/{0}/unpause', container.Id) + except Exception as exc: + self.fail("Error %s container %s: %s" % ( + "pausing" if self.module.params['paused'] else "unpausing", container.Id, to_native(exc) + )) + container = self._get_container(container.Id) + self.results['changed'] = True + self.results['actions'].append(dict(set_paused=self.module.params['paused'])) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.module.params['name']) + if container.exists: + if container.running: + self.diff_tracker.add('running', parameter=False, active=True) + self.container_stop(container.Id) + self.diff_tracker.add('exists', parameter=False, active=True) + self.container_remove(container.Id) + + def _output_logs(self, msg): + self.client.module.log(msg=msg) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + return Container(self.client.get_container(container)) + + def _get_image(self): + image_parameter = self.module.params['image'] + if not image_parameter: + self.log('No image specified') + return None + if is_image_name_id(image_parameter): + image = self.client.find_image_by_id(image_parameter) + else: + repository, tag = parse_repository_tag(image_parameter) + if not tag: + tag = "latest" + image = self.client.find_image(repository, tag) + if not image or self.module.params['pull']: + if not self.check_mode: + self.log("Pull the image.") + image, alreadyToLatest = self.client.pull_image(repository, tag) + if alreadyToLatest: + self.results['changed'] = False + else: + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + elif not image: + # If the image isn't there, claim we'll pull. + # (Implicitly: if the image is there, claim it already was latest.) + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.Image: + if image.get('Id') != container.Image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) + return True + return False + + def _compose_create_parameters(self, image): + params = { + 'Image': image, + } + for options in self.options: + engine = options.get_engine('docker_api') + if engine.can_set_value(self.client.docker_api_version): + values = {} + for option in options.options: + if self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) + return params + + def has_different_configuration(self, container, image): + differences = DifferenceTracker() + for options in self.options: + engine = options.get_engine('docker_api') + container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + for option in options.options: + if self.module.params[option.name] is not None: + param_value = self.module.params[option.name] + container_value = container_values.get(option.name) + compare = self.comparisons[option.name] + match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) + + if not match: + # TODO + # if option.option == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: + # # If the healthcheck is disabled (both in parameters and for the current container), and the user + # # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the + # # expected_healthcheck comparison in this case. + # continue + + if option.option == 'labels' and compare['comparison'] == 'strict' and self.module.params['image_label_mismatch'] == 'fail': + # If there are labels from the base image that should be removed and + # base_image_mismatch is fail we want raise an error. + image_labels = self._get_image_labels(image) + would_remove_labels = [] + for label in image_labels: + if label not in self.module.params['labels']: + # Format label for error message + would_remove_labels.append(label) + if would_remove_labels: + msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" + " this error. Labels: {0}") + self.fail(msg.format(', '.join(['"%s"' % label for label in would_remove_labels]))) + + # no match. record the differences + p = param_value + c = container_value + if compare['type'] == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif compare['type'] == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if option.option == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(option.option, parameter=p, active=c) + + has_differences = not differences.empty + return has_differences, differences + + def has_different_resource_limits(self, container): + ''' + Diff parameters and container resource limits + ''' + differences = DifferenceTracker() + for options in self.options: + engine = options.get_engine('docker_api') + if not engine.can_update_value(self.client.docker_api_version): + continue + container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + for option in options.options: + if self.module.params[option.name] is not None: + param_value = self.module.params[option.name] + container_value = container_values.get(option.name) + compare = self.comparisons[option.name] + match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) + + if not match: + # no match. record the differences + differences.add(option.option, parameter=param_value, active=container_value) + different = not differences.empty + return different, differences + + def _compose_update_parameters(self): + result = {} + for options in self.options: + engine = options.get_engine('docker_api') + if not engine.can_update_value(self.client.docker_api_version): + continue + values = {} + for option in options.options: + if self.module.params[option.option] is not None: + values[option.option] = self.module.params[option.option] + engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) + return result + + def update_limits(self, container): + limits_differ, different_limits = self.has_different_resource_limits(container) + if limits_differ: + self.log("limit differences:") + self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) + self.diff_tracker.merge(different_limits) + if limits_differ and not self.check_mode: + self.container_update(container.Id, self._compose_update_parameters()) + return self._get_container(container.Id) + return container + + def has_network_differences(self, container): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.module.params['networks']: + return different, differences + + if not container.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings']['Networks'] + for network in self.module.params['networks']: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self, container): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not container.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.module.params['networks']: + for expected_network in self.module.params['networks']: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def update_networks(self, container, container_created): + updated_container = container + if self.comparisons['networks']['comparison'] != 'ignore' or container_created: + has_network_differences, network_differences = self.has_network_differences(container) + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + for netdiff in network_differences: + self.diff_tracker.add( + 'network.{0}'.format(netdiff['parameter']['name']), + parameter=netdiff['parameter'], + active=netdiff['container'] + ) + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if (self.comparisons['networks']['comparison'] == 'strict' and self.module.params['networks'] is not None) or self.module.params['purge_networks']: + has_extra_networks, extra_networks = self.has_extra_networks(container) + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + for extra_network in extra_networks: + self.diff_tracker.add( + 'network.{0}'.format(extra_network['name']), + active=extra_network + ) + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.client.post_json('/networks/{0}/disconnect', diff['parameter']['id'], data={'Container': container.Id}) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + to_native(exc))) + # connect to the network + params = dict() + for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): + if diff['parameter'].get(para): + value = diff['parameter'][para] + if para == 'links': + value = normalize_links(value) + params[dest_para] = value + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) + if not self.check_mode: + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + data = { + 'Container': container.Id, + 'EndpointConfig': params, + } + self.client.post_json('/networks/{0}/connect', diff['parameter']['id'], data=data) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) + return self._get_container(container.Id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.client.post_json('/networks/{0}/disconnect', network['name'], data={'Container': container.Id}) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + to_native(exc))) + return self._get_container(container.Id) + + def container_create(self, image): + create_parameters = self._compose_create_parameters(image) + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + params = {'name': self.module.params['name']} + new_container = self.client.post_json_to_json('/containers/create', data=create_parameters, params=params) + self.client.report_warnings(new_container) + except Exception as exc: + self.fail("Error creating container: %s" % to_native(exc)) + return self._get_container(new_container['Id']) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.client.post_json('/containers/{0}/start', container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) + + if self.module.params['detach'] is False: + status = self.client.post_json_as_json('/containers/{0}/wait', container_id)['StatusCode'] + self.client.fail_results['status'] = status + self.results['status'] = status + + if self.module.params['auto_remove']: + output = "Cannot retrieve result as auto_remove is enabled" + if self.module.params['output_logs']: + self.client.module.warn('Cannot output_logs if auto_remove is enabled!') + else: + config = self.client.get_json('/containers/{0}/json', container_id) + logging_driver = config['HostConfig']['LogConfig']['Type'] + + if logging_driver in ('json-file', 'journald', 'local'): + params = { + 'stderr': 1, + 'stdout': 1, + 'timestamps': 0, + 'follow': 0, + 'tail': 'all', + } + res = self.client._get(self.client._url('/containers/{0}/logs', container_id), params=params) + output = self.client._get_result_tty(False, res, config['Config']['Tty']) + if self.module.params['output_logs']: + self._output_logs(msg=output) + else: + output = "Result logged using `%s` driver" % logging_driver + + if self.module.params['cleanup']: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + if status != 0: + # Set `failed` to True and return output as msg + self.results['failed'] = True + self.results['msg'] = output + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.module.params['keep_volumes']) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + if not self.check_mode: + count = 0 + while True: + try: + params = {'v': volume_state, 'link': link, 'force': force} + self.client.delete_call('"/containers/{0}', container_id, params=params) + except NotFound as dummy: + pass + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) + count += 1 + # Unpause + try: + self.client.post_call('/containers/{0}/unpause', container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) + # Now try again + continue + if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: + pass + else: + self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + except Exception as exc: + self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + # We only loop when explicitly requested by 'continue' + break + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode and callable(getattr(self.client, 'update_container')): + try: + result = self.client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) + self.client.report_warnings(result) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.module.params['kill_signal'])) + self.results['changed'] = True + if not self.check_mode: + try: + params = {} + if self.module.params['kill_signal'] is not None: + params['signal'] = int(self.module.params['kill_signal']) + self.client.post_call('/containers/{0}/kill', container_id, params=params) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) + + def container_restart(self, container_id): + self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + try: + timeout = self.module.params['stop_timeout'] or 10 + client_timeout = self.client.timeout + if client_timeout is not None: + client_timeout += timeout + self.client.post('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) + except Exception as exc: + self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_stop(self, container_id): + if self.module.params['force_kill']: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + count = 0 + while True: + try: + timeout = self.module.params['stop_timeout'] + if timeout: + params = {'t': timeout} + else: + params = {} + timeout = 10 + client_timeout = self.client.timeout + if client_timeout is not None: + client_timeout += timeout + self.client.post('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) + count += 1 + # Unpause + try: + self.client.post_call('/containers/{0}/unpause', container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) + # Now try again + continue + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + # We only loop when explicitly requested by 'continue' + break def main(): @@ -1260,9 +2089,20 @@ def main(): keep_volumes=dict(type='bool', default=True), kill_signal=dict(type='str'), name=dict(type='str', required=True), + networks=dict(type='list', elements='dict', options=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )), networks_cli_compatible=dict(type='bool', default=True), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), restart=dict(type='bool', default=False), state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), stop_signal=dict(type='str'), @@ -1278,6 +2118,7 @@ def main(): option_minimal_versions = {} + active_options = [] for options in OPTIONS: if not options.supports_engine('docker_api'): continue @@ -1294,6 +2135,8 @@ def main(): for option in options.options: option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} + active_options.append(options) + client = AnsibleDockerClient( argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, @@ -1304,13 +2147,9 @@ def main(): option_minimal_versions=option_minimal_versions, supports_check_mode=True, ) - if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - client.module.params['network_mode'] = client.module.params['networks'][0]['name'] try: - cm = ContainerManager(client, OPTIONS) + cm = ContainerManager(client, active_options) cm.run() client.module.exit_json(**sanitize_result(cm.results)) except DockerException as e: From 535374b64f2508287573ca933dde9a805773c5d7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 8 Jul 2022 08:02:09 +0200 Subject: [PATCH 07/38] Continuing. --- plugins/module_utils/module_container.py | 3 +- plugins/modules/docker_container2.py | 135 +++++++++++++---------- 2 files changed, 79 insertions(+), 59 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index d4b66793a..1c482247e 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -384,7 +384,7 @@ def _postprocess_healthcheck_get_value(module, api_version, value, sentry): def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): if name not in values: - return + return values try: value = values[name] if unlimited_value is not None and value == 'unlimited': @@ -392,6 +392,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): else: value = human_to_bytes(value) values[name] = value + return values except ValueError as exc: module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 8ba545e76..31b179a45 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1281,22 +1281,50 @@ def __init__(self, client, active_options): for option in options.options: self.all_options[option.name] = option self.module = client.module + self.param_cleanup = self.module.params['cleanup'] + self.param_container_default_behavior = self.module.params['container_default_behavior'] + self.param_debug = self.module.params['debug'] + self.param_force_kill = self.module.params['force_kill'] + self.param_image = self.module.params['image'] + self.param_image_label_mismatch = self.module.params['image_label_mismatch'] + self.param_keep_volumes = self.module.params['keep_volumes'] + self.param_kill_signal = self.module.params['kill_signal'] + self.param_name = self.module.params['name'] + self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] + self.param_output_logs = self.module.params['output_logs'] + self.param_paused = self.module.params['paused'] + self.param_pull = self.module.params['pull'] + self.param_purge_networks = self.module.params['purge_networks'] + self.param_recreate = self.module.params['recreate'] + self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] + self.param_restart = self.module.params['restart'] + self.param_state = self.module.params['state'] self.comparisons = {} self._parse_comparisons() self._update_params() - self.check_mode = self.client.check_mode self.results = {'changed': False, 'actions': []} self.diff = {} self.diff_tracker = DifferenceTracker() self.facts = {} + self.parameters = [] + for options in active_options: + values = {} + engine = options.get_engine('docker_api') + for option in options.options: + if self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client.docker_api_version, options.options, values) + self.parameters.append((options, values)) + def _update_params(self): - if self.module.params['networks_cli_compatible'] is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode # (assuming no explicit value is specified for network_mode) self.module.params['network_mode'] = self.module.params['networks'][0]['name'] - if self.module.params['container_default_behavior'] == 'compatibility': + if self.param_container_default_behavior == 'compatibility': old_default_values = dict( auto_remove=False, detach=True, @@ -1361,7 +1389,7 @@ def _parse_comparisons(self): # Process legacy ignore options if self.module.params['ignore_image']: self.comparisons['image']['comparison'] = 'ignore' - if self.module.params['purge_networks']: + if self.param_purge_networks: self.comparisons['networks']['comparison'] = 'strict' # Process comparsions specified by user if self.module.params.get('comparisons'): @@ -1406,23 +1434,22 @@ def _parse_comparisons(self): # Check legacy values if self.module.params['ignore_image'] and self.comparisons['image']['comparison'] != 'ignore': self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.module.params['purge_networks'] and self.comparisons['networks']['comparison'] != 'strict': + if self.param_purge_networks and self.comparisons['networks']['comparison'] != 'strict': self.module.warn('The purge_networks option has been overridden by the comparisons option!') def fail(self, *args, **kwargs): self.client.fail(*args, **kwargs) def run(self): - state = self.module.params['state'] - if state in ('stopped', 'started', 'present'): - self.present(state) - elif state == 'absent': + if self.param_state in ('stopped', 'started', 'present'): + self.present(self.param_state) + elif self.param_state == 'absent': self.absent() - if not self.check_mode and not self.module.params['debug']: + if not self.check_mode and not self.param_debug: self.results.pop('actions') - if self.module._diff or self.module.params['debug']: + if self.module._diff or self.param_debug: self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() self.results['diff'] = self.diff @@ -1463,7 +1490,7 @@ def wait_for_state(self, container_id, complete_states=None, wait_states=None, a delay = min(delay * 1.1, 10) def present(self, state): - container = self._get_container(self.module.params['name']) + container = self._get_container(self.param_name) was_running = container.running was_paused = container.paused container_created = False @@ -1481,14 +1508,14 @@ def present(self, state): self.log('Found container in removal phase') else: self.log('No container found') - if not self.module.params['image']: + if not self.param_image: self.fail('Cannot create container when image is not specified!') self.diff_tracker.add('exists', parameter=True, active=False) if container.removing and not self.check_mode: # Wait for container to be removed before trying to create it self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.module.params['removal_wait_timeout']) - new_container = self.container_create(self.module.params['image']) + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(self.param_image) if new_container: container = new_container container_created = True @@ -1498,14 +1525,14 @@ def present(self, state): image_different = False if self.comparisons['image']['comparison'] == 'strict': image_different = self._image_is_different(image, container) - if image_different or different or self.module.params['recreate']: + if image_different or different or self.param_recreate: self.diff_tracker.merge(differences) self.diff['differences'] = differences.get_legacy_docker_container_diffs() if image_different: self.diff['image_different'] = True self.log("differences") self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) - image_to_use = self.module.params['image'] + image_to_use = self.param_image if not image_to_use and container and container.Image: image_to_use = container.Image if not image_to_use: @@ -1515,7 +1542,7 @@ def present(self, state): self.container_remove(container.Id) if not self.check_mode: self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.module.params['removal_wait_timeout']) + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) new_container = self.container_create(image_to_use) if new_container: container = new_container @@ -1528,7 +1555,7 @@ def present(self, state): if state == 'started' and not container.running: self.diff_tracker.add('running', parameter=True, active=was_running) container = self.container_start(container.Id) - elif state == 'started' and self.module.params['restart']: + elif state == 'started' and self.param_restart: self.diff_tracker.add('running', parameter=True, active=was_running) self.diff_tracker.add('restarted', parameter=True, active=False) container = self.container_restart(container.Id) @@ -1537,26 +1564,26 @@ def present(self, state): self.container_stop(container.Id) container = self._get_container(container.Id) - if state == 'started' and self.module.params['paused'] is not None and container.paused != self.module.params['paused']: - self.diff_tracker.add('paused', parameter=self.module.params['paused'], active=was_paused) + if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: + self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) if not self.check_mode: try: - if self.module.params['paused']: + if self.param_paused: self.client.post_call('/containers/{0}/pause', container.Id) else: self.client.post_call('/containers/{0}/unpause', container.Id) except Exception as exc: self.fail("Error %s container %s: %s" % ( - "pausing" if self.module.params['paused'] else "unpausing", container.Id, to_native(exc) + "pausing" if self.param_paused else "unpausing", container.Id, to_native(exc) )) container = self._get_container(container.Id) self.results['changed'] = True - self.results['actions'].append(dict(set_paused=self.module.params['paused'])) + self.results['actions'].append(dict(set_paused=self.param_paused)) self.facts = container.raw def absent(self): - container = self._get_container(self.module.params['name']) + container = self._get_container(self.param_name) if container.exists: if container.running: self.diff_tracker.add('running', parameter=False, active=True) @@ -1574,7 +1601,7 @@ def _get_container(self, container): return Container(self.client.get_container(container)) def _get_image(self): - image_parameter = self.module.params['image'] + image_parameter = self.param_image if not image_parameter: self.log('No image specified') return None @@ -1585,7 +1612,7 @@ def _get_image(self): if not tag: tag = "latest" image = self.client.find_image(repository, tag) - if not image or self.module.params['pull']: + if not image or self.param_pull: if not self.check_mode: self.log("Pull the image.") image, alreadyToLatest = self.client.pull_image(repository, tag) @@ -1616,37 +1643,33 @@ def _compose_create_parameters(self, image): params = { 'Image': image, } - for options in self.options: + for options, values in self.parameters: engine = options.get_engine('docker_api') if engine.can_set_value(self.client.docker_api_version): - values = {} - for option in options.options: - if self.module.params[option.name] is not None: - values[option.name] = self.module.params[option.name] engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) return params def has_different_configuration(self, container, image): differences = DifferenceTracker() - for options in self.options: + for options, param_values in self.parameters: engine = options.get_engine('docker_api') container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) for option in options.options: - if self.module.params[option.name] is not None: - param_value = self.module.params[option.name] + if option.name in param_values: + param_value = param_values[option.name] container_value = container_values.get(option.name) compare = self.comparisons[option.name] match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) if not match: # TODO - # if option.option == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: + # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: # # If the healthcheck is disabled (both in parameters and for the current container), and the user # # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the # # expected_healthcheck comparison in this case. # continue - if option.option == 'labels' and compare['comparison'] == 'strict' and self.module.params['image_label_mismatch'] == 'fail': + if option.name == 'labels' and compare['comparison'] == 'strict' and self.param_image_label_mismatch == 'fail': # If there are labels from the base image that should be removed and # base_image_mismatch is fail we want raise an error. image_labels = self._get_image_labels(image) @@ -1671,7 +1694,7 @@ def has_different_configuration(self, container, image): c = sorted(c) elif compare['type'] == 'set(dict)': # Since the order does not matter, sort so that the diff output is better. - if option.option == 'expected_mounts': + if option.name == 'expected_mounts': # For selected values, use one entry as key def sort_key_fn(x): return x['target'] @@ -1683,7 +1706,7 @@ def sort_key_fn(x): p = sorted(p, key=sort_key_fn) if c is not None: c = sorted(c, key=sort_key_fn) - differences.add(option.option, parameter=p, active=c) + differences.add(option.name, parameter=p, active=c) has_differences = not differences.empty return has_differences, differences @@ -1693,34 +1716,30 @@ def has_different_resource_limits(self, container): Diff parameters and container resource limits ''' differences = DifferenceTracker() - for options in self.options: + for options, param_values in self.parameters: engine = options.get_engine('docker_api') if not engine.can_update_value(self.client.docker_api_version): continue container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) for option in options.options: - if self.module.params[option.name] is not None: - param_value = self.module.params[option.name] + if option.name in param_values: + param_value = param_values[option.name] container_value = container_values.get(option.name) compare = self.comparisons[option.name] match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) if not match: # no match. record the differences - differences.add(option.option, parameter=param_value, active=container_value) + differences.add(option.name, parameter=param_value, active=container_value) different = not differences.empty return different, differences def _compose_update_parameters(self): result = {} - for options in self.options: + for options, values in self.parameters: engine = options.get_engine('docker_api') if not engine.can_update_value(self.client.docker_api_version): continue - values = {} - for option in options.options: - if self.module.params[option.option] is not None: - values[option.option] = self.module.params[option.option] engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) return result @@ -1828,7 +1847,7 @@ def update_networks(self, container, container_created): self.results['changed'] = True updated_container = self._add_networks(container, network_differences) - if (self.comparisons['networks']['comparison'] == 'strict' and self.module.params['networks'] is not None) or self.module.params['purge_networks']: + if (self.comparisons['networks']['comparison'] == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: has_extra_networks, extra_networks = self.has_extra_networks(container) if has_extra_networks: if self.diff.get('differences'): @@ -1898,7 +1917,7 @@ def container_create(self, image): new_container = None if not self.check_mode: try: - params = {'name': self.module.params['name']} + params = {'name': self.param_name} new_container = self.client.post_json_to_json('/containers/create', data=create_parameters, params=params) self.client.report_warnings(new_container) except Exception as exc: @@ -1923,7 +1942,7 @@ def container_start(self, container_id): if self.module.params['auto_remove']: output = "Cannot retrieve result as auto_remove is enabled" - if self.module.params['output_logs']: + if self.param_output_logs: self.client.module.warn('Cannot output_logs if auto_remove is enabled!') else: config = self.client.get_json('/containers/{0}/json', container_id) @@ -1939,12 +1958,12 @@ def container_start(self, container_id): } res = self.client._get(self.client._url('/containers/{0}/logs', container_id), params=params) output = self.client._get_result_tty(False, res, config['Config']['Tty']) - if self.module.params['output_logs']: + if self.param_output_logs: self._output_logs(msg=output) else: output = "Result logged using `%s` driver" % logging_driver - if self.module.params['cleanup']: + if self.param_cleanup: self.container_remove(container_id, force=True) insp = self._get_container(container_id) if insp.raw: @@ -1959,7 +1978,7 @@ def container_start(self, container_id): return self._get_container(container_id) def container_remove(self, container_id, link=False, force=False): - volume_state = (not self.module.params['keep_volumes']) + volume_state = (not self.param_keep_volumes) self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) self.results['changed'] = True @@ -2009,13 +2028,13 @@ def container_update(self, container_id, update_parameters): return self._get_container(container_id) def container_kill(self, container_id): - self.results['actions'].append(dict(killed=container_id, signal=self.module.params['kill_signal'])) + self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) self.results['changed'] = True if not self.check_mode: try: params = {} - if self.module.params['kill_signal'] is not None: - params['signal'] = int(self.module.params['kill_signal']) + if self.param_kill_signal is not None: + params['signal'] = int(self.param_kill_signal) self.client.post_call('/containers/{0}/kill', container_id, params=params) except Exception as exc: self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) @@ -2035,7 +2054,7 @@ def container_restart(self, container_id): return self._get_container(container_id) def container_stop(self, container_id): - if self.module.params['force_kill']: + if self.param_force_kill: self.container_kill(container_id) return self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) From b4f54cb1eb38cc3b6fb2f36aa4282b9fa6f707a8 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 9 Jul 2022 13:50:59 +0200 Subject: [PATCH 08/38] Improvements and fixes. --- plugins/module_utils/module_container.py | 15 +- plugins/modules/docker_container2.py | 179 ++++++++++------------- 2 files changed, 94 insertions(+), 100 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 1c482247e..327df98c6 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -35,6 +35,7 @@ def __init__( ansible_suboptions=None, ansible_aliases=None, ansible_choices=None, + default_comparison=None, ): self.name = name self.type = type @@ -59,6 +60,18 @@ def __init__( self.ansible_suboptions = ansible_suboptions if needs_suboptions else None self.ansible_aliases = ansible_aliases or [] self.ansible_choices = ansible_choices + comparison_type = self.type + if comparison_type == 'set' and self.elements == 'dict': + comparison_type = 'set(dict)' + elif comparison_type not in ('set', 'list', 'dict'): + comparison_type = 'value' + self.comparison_type = comparison_type + if default_comparison is not None: + self.comparison = default_comparison + elif comparison_type in ('list', 'value'): + self.comparison = 'strict' + else: + self.comparison = 'allow_more_present' class OptionGroup(object): @@ -637,7 +650,7 @@ def _preprocess_links(module, api_version, value): # runtime=dict(type='str'), # security_opts=dict(type='list', elements='str'), # shm_size=dict(type='str'), -# stop_timeout=dict(type='int'), +# stop_timeout=dict(type='int'), default_comparison='ignore' # storage_opts=dict(type='dict'), # sysctls=dict(type='dict'), # tmpfs=dict(type='list', elements='str'), diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 31b179a45..83686fe3e 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1218,6 +1218,7 @@ from ansible_collections.community.docker.plugins.module_utils.module_container import ( DockerAPIEngineDriver, OPTIONS, + Option, ) from ansible_collections.community.docker.plugins.module_utils.util import ( DifferenceTracker, @@ -1273,14 +1274,12 @@ def paused(self): class ContainerManager(DockerBaseClass): - def __init__(self, client, active_options): + def __init__(self, module, client, active_options): self.client = client self.options = active_options - self.all_options = {} - for options in active_options: - for option in options.options: - self.all_options[option.name] = option - self.module = client.module + self.all_options = self._collect_all_options(active_options) + self.module = module + self.check_mode = self.module.check_mode self.param_cleanup = self.module.params['cleanup'] self.param_container_default_behavior = self.module.params['container_default_behavior'] self.param_debug = self.module.params['debug'] @@ -1299,98 +1298,48 @@ def __init__(self, client, active_options): self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] self.param_restart = self.module.params['restart'] self.param_state = self.module.params['state'] - self.comparisons = {} self._parse_comparisons() self._update_params() - self.check_mode = self.client.check_mode + self.parameters = self._collect_params(active_options) self.results = {'changed': False, 'actions': []} self.diff = {} self.diff_tracker = DifferenceTracker() self.facts = {} - self.parameters = [] + def _collect_all_options(self, active_options): + all_options = {} for options in active_options: - values = {} - engine = options.get_engine('docker_api') for option in options.options: - if self.module.params[option.name] is not None: - values[option.name] = self.module.params[option.name] - values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client.docker_api_version, options.options, values) - self.parameters.append((options, values)) - - def _update_params(self): - if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - self.module.params['network_mode'] = self.module.params['networks'][0]['name'] - if self.param_container_default_behavior == 'compatibility': - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory='0', - paused=False, - privileged=False, - read_only=False, - tty=False, - ) - for param, value in old_default_values.items(): - if self.module.params[param] is None: - self.module.params[param] = value + all_options[option.name] = option + for option in [ + Option('image', 'str', None), + Option('networks', 'set', None, elements='dict', ansible_suboptions={}), + ]: + all_options[option.name] = option + return all_options + + def _collect_all_module_params(self): + all_module_options = set() + for option, data in self.module.argument_spec.items(): + all_module_options.add(option) + if 'aliases' in data: + for alias in data['aliases']: + all_module_options.add(alias) + return all_module_options def _parse_comparisons(self): - # Create default values for comparisons - self.comparisons['image'] = { - 'type': 'value', - 'comparison': 'strict', - 'name': 'image', - } - self.comparisons['networks'] = { - 'type': 'set(dict)', - 'comparison': 'allow_more_present', - 'name': 'networks', - } - default_comparison_values = dict( - stop_timeout='ignore', - ) + # Keep track of all module params and all option aliases + all_module_options = self._collect_all_module_params() comp_aliases = {} for option_name, option in self.all_options.items(): - # Keep track of all aliases comp_aliases[option_name] = option_name for alias in option.ansible_aliases: comp_aliases[alias] = option_name - # Determine datatype - datatype = option.type - if datatype == 'set' and option.elements == 'dict': - datatype = 'set(dict)' - elif datatype not in ('set', 'list', 'dict'): - datatype = 'value' - # Determine comparison - if option in default_comparison_values: - comparison = default_comparison_values[option] - elif datatype in ('list', 'value'): - comparison = 'strict' - else: - comparison = 'allow_more_present' - self.comparisons[option_name] = { - 'type': datatype, - 'comparison': comparison, - 'name': option_name, - } - # Collect all module options - all_module_options = set(comp_aliases) - for option, data in self.module.argument_spec.items(): - all_module_options.add(option) - if 'aliases' in data: - for alias in data['aliases']: - all_module_options.add(alias) # Process legacy ignore options if self.module.params['ignore_image']: - self.comparisons['image']['comparison'] = 'ignore' + self.all_options['image'].comparison = 'ignore' if self.param_purge_networks: - self.comparisons['networks']['comparison'] = 'strict' + self.all_options['networks'].comparison = 'strict' # Process comparsions specified by user if self.module.params.get('comparisons'): # If '*' appears in comparisons, process it first @@ -1398,13 +1347,13 @@ def _parse_comparisons(self): value = self.module.params['comparisons']['*'] if value not in ('strict', 'ignore'): self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") - for option, v in self.comparisons.items(): - if option == 'networks': + for option in self.all_options.values(): + if option.name == 'networks': # `networks` is special: only update if # some value is actually specified if self.module.params['networks'] is None: continue - v['comparison'] = value + option.comparison = value # Now process all other comparisons. comp_aliases_used = {} for key, value in self.module.params['comparisons'].items(): @@ -1416,7 +1365,7 @@ def _parse_comparisons(self): if key_main in all_module_options: self.fail("The module option '%s' cannot be specified in the comparisons dict, " "since it does not correspond to container's state!" % key) - if key not in self.comparisons: + if key not in self.all_options: self.fail("Unknown module option '%s' in comparisons dict!" % key) key_main = key if key_main in comp_aliases_used: @@ -1424,19 +1373,53 @@ def _parse_comparisons(self): comp_aliases_used[key_main] = key # Check value and update accordingly if value in ('strict', 'ignore'): - self.comparisons[key_main]['comparison'] = value + self.all_options[key_main].comparison = value elif value == 'allow_more_present': - if self.comparisons[key_main]['type'] == 'value': + if self.all_options[key_main].comparison_type == 'value': self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) - self.comparisons[key_main]['comparison'] = value + self.all_options[key_main].comparison = value else: self.fail("Unknown comparison mode '%s'!" % value) # Check legacy values - if self.module.params['ignore_image'] and self.comparisons['image']['comparison'] != 'ignore': + if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.param_purge_networks and self.comparisons['networks']['comparison'] != 'strict': + if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': self.module.warn('The purge_networks option has been overridden by the comparisons option!') + def _update_params(self): + if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + self.module.params['network_mode'] = self.module.params['networks'][0]['name'] + if self.param_container_default_behavior == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory='0', + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + def _collect_params(self, active_options): + parameters = [] + for options in active_options: + values = {} + engine = options.get_engine('docker_api') + for option in options.options: + if self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client.docker_api_version, options.options, values) + parameters.append((options, values)) + return parameters + def fail(self, *args, **kwargs): self.client.fail(*args, **kwargs) @@ -1523,7 +1506,7 @@ def present(self, state): # Existing container different, differences = self.has_different_configuration(container, image) image_different = False - if self.comparisons['image']['comparison'] == 'strict': + if self.all_options['image'].comparison == 'strict': image_different = self._image_is_different(image, container) if image_different or different or self.param_recreate: self.diff_tracker.merge(differences) @@ -1592,7 +1575,7 @@ def absent(self): self.container_remove(container.Id) def _output_logs(self, msg): - self.client.module.log(msg=msg) + self.module.log(msg=msg) def _get_container(self, container): ''' @@ -1658,8 +1641,7 @@ def has_different_configuration(self, container, image): if option.name in param_values: param_value = param_values[option.name] container_value = container_values.get(option.name) - compare = self.comparisons[option.name] - match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) if not match: # TODO @@ -1725,8 +1707,7 @@ def has_different_resource_limits(self, container): if option.name in param_values: param_value = param_values[option.name] container_value = container_values.get(option.name) - compare = self.comparisons[option.name] - match = compare_generic(param_value, container_value, compare['comparison'], compare['type']) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) if not match: # no match. record the differences @@ -1831,7 +1812,7 @@ def has_extra_networks(self, container): def update_networks(self, container, container_created): updated_container = container - if self.comparisons['networks']['comparison'] != 'ignore' or container_created: + if self.all_options['networks'].comparison != 'ignore' or container_created: has_network_differences, network_differences = self.has_network_differences(container) if has_network_differences: if self.diff.get('differences'): @@ -1847,7 +1828,7 @@ def update_networks(self, container, container_created): self.results['changed'] = True updated_container = self._add_networks(container, network_differences) - if (self.comparisons['networks']['comparison'] == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: + if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: has_extra_networks, extra_networks = self.has_extra_networks(container) if has_extra_networks: if self.diff.get('differences'): @@ -1943,7 +1924,7 @@ def container_start(self, container_id): if self.module.params['auto_remove']: output = "Cannot retrieve result as auto_remove is enabled" if self.param_output_logs: - self.client.module.warn('Cannot output_logs if auto_remove is enabled!') + self.module.warn('Cannot output_logs if auto_remove is enabled!') else: config = self.client.get_json('/containers/{0}/json', container_id) logging_driver = config['HostConfig']['LogConfig']['Type'] @@ -1987,7 +1968,7 @@ def container_remove(self, container_id, link=False, force=False): while True: try: params = {'v': volume_state, 'link': link, 'force': force} - self.client.delete_call('"/containers/{0}', container_id, params=params) + self.client.delete_call('/containers/{0}', container_id, params=params) except NotFound as dummy: pass except APIError as exc: @@ -2168,7 +2149,7 @@ def main(): ) try: - cm = ContainerManager(client, active_options) + cm = ContainerManager(client.module, client, active_options) cm.run() client.module.exit_json(**sanitize_result(cm.results)) except DockerException as e: From 1a85e3a968e4cb2c584e3aecd07b89e821aed6f3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 16:53:24 +0200 Subject: [PATCH 09/38] Continuing. --- changelogs/fragments/docker_container.yml | 1 + plugins/module_utils/module_container.py | 296 +++++++++++++++++----- plugins/modules/docker_container2.py | 44 ++-- 3 files changed, 254 insertions(+), 87 deletions(-) diff --git a/changelogs/fragments/docker_container.yml b/changelogs/fragments/docker_container.yml index 1dfb75edf..f922092fd 100644 --- a/changelogs/fragments/docker_container.yml +++ b/changelogs/fragments/docker_container.yml @@ -7,3 +7,4 @@ breaking_changes: - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/...)." - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/...)." diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 327df98c6..da09a4cc8 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -14,6 +14,8 @@ parse_healthcheck, ) +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_env_file + def _get_ansible_type(type): if type == 'set': @@ -35,7 +37,9 @@ def __init__( ansible_suboptions=None, ansible_aliases=None, ansible_choices=None, + needs_no_suboptions=False, default_comparison=None, + not_a_container_option=False, ): self.name = name self.type = type @@ -55,7 +59,7 @@ def __init__( needs_suboptions = (self.type in ('list', 'set') and elements == 'dict') or (self.type == 'dict') if ansible_suboptions is not None and not needs_suboptions: raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') - if ansible_suboptions is None and needs_suboptions: + if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions: raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') self.ansible_suboptions = ansible_suboptions if needs_suboptions else None self.ansible_aliases = ansible_aliases or [] @@ -72,6 +76,7 @@ def __init__( self.comparison = 'strict' else: self.comparison = 'allow_more_present' + self.not_a_container_option = not_a_container_option class OptionGroup(object): @@ -99,7 +104,8 @@ def preprocess(module, values): def add_option(self, *args, **kwargs): option = Option(*args, owner=self, **kwargs) - self.options.append(option) + if not option.not_a_container_option: + self.options.append(option) ansible_option = { 'type': option.ansible_type, } @@ -136,7 +142,9 @@ class DockerAPIEngine(object): def __init__( self, get_value, - preprocess_value, + preprocess_value=None, + get_expected_values=None, + ignore_mismatching_result=None, set_value=None, update_value=None, can_set_value=None, @@ -147,7 +155,9 @@ def __init__( self.min_docker_api_obj = None if min_docker_api is None else LooseVersion(min_docker_api) self.get_value = get_value self.set_value = set_value - self.preprocess_value = preprocess_value + self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values) + self.ignore_mismatching_result = ignore_mismatching_result or (lambda module, client, api_version, option, image, container_value, expected_value: False) + self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values) self.update_value = update_value self.can_set_value = can_set_value or (lambda api_version: set_value is not None) self.can_update_value = can_update_value or (lambda api_version: update_value is not None) @@ -155,30 +165,43 @@ def __init__( @classmethod def config_value( cls, - host_config_name, + config_name, postprocess_for_get=None, preprocess_for_set=None, + get_expected_value=None, + ignore_mismatching_result=None, min_docker_api=None, preprocess_value=None, update_parameter=None, ): - def preprocess_value_(module, api_version, options, values): + def preprocess_value_(module, client, api_version, options, values): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') if preprocess_value is not None and options[0].name in values: - values[options[0].name] = preprocess_value(module, api_version, values[options[0].name]) + values[options[0].name] = preprocess_value(module, client, api_version, values[options[0].name]) return values def get_value(module, container, api_version, options): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') - value = container.get(host_config_name, _SENTRY) + value = container['Config'].get(config_name, _SENTRY) if postprocess_for_get: value = postprocess_for_get(module, api_version, value, _SENTRY) if value is _SENTRY: return {} return {options[0].name: value} + get_expected_values_ = None + if get_expected_value: + def get_expected_values_(module, client, api_version, options, image, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = values.get(options[0].name, _SENTRY) + value = get_expected_value(module, client, api_version, image, value, _SENTRY) + if value is _SENTRY: + return values + return {options[0].name: value} + def set_value(module, data, api_version, options, values): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') @@ -187,7 +210,7 @@ def set_value(module, data, api_version, options, values): value = values[options[0].name] if preprocess_for_set: value = preprocess_for_set(module, api_version, value) - data[host_config_name] = value + data[config_name] = value update_value = None if update_parameter: @@ -201,7 +224,15 @@ def update_value(module, data, api_version, options, values): value = preprocess_for_set(module, api_version, value) data[update_parameter] = value - return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) + return cls( + get_value=get_value, + preprocess_value=preprocess_value_, + get_expected_values=get_expected_values_, + ignore_mismatching_result=ignore_mismatching_result, + set_value=set_value, + min_docker_api=min_docker_api, + update_value=update_value, + ) @classmethod def host_config_value( @@ -209,15 +240,17 @@ def host_config_value( host_config_name, postprocess_for_get=None, preprocess_for_set=None, + get_expected_value=None, + ignore_mismatching_result=None, min_docker_api=None, preprocess_value=None, update_parameter=None, ): - def preprocess_value_(module, api_version, options, values): + def preprocess_value_(module, client, api_version, options, values): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') if preprocess_value is not None and options[0].name in values: - values[options[0].name] = preprocess_value(module, api_version, values[options[0].name]) + values[options[0].name] = preprocess_value(module, client, api_version, values[options[0].name]) return values def get_value(module, container, api_version, options): @@ -230,6 +263,17 @@ def get_value(module, container, api_version, options): return {} return {options[0].name: value} + get_expected_values_ = None + if get_expected_value: + def get_expected_values_(module, client, api_version, options, image, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = values.get(options[0].name, _SENTRY) + value = get_expected_value(module, client, api_version, image, value, _SENTRY) + if value is _SENTRY: + return values + return {options[0].name: value} + def set_value(module, data, api_version, options, values): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') @@ -254,10 +298,44 @@ def update_value(module, data, api_version, options, values): value = preprocess_for_set(module, api_version, value) data[update_parameter] = value - return cls(get_value=get_value, preprocess_value=preprocess_value_, set_value=set_value, min_docker_api=min_docker_api, update_value=update_value) + return cls( + get_value=get_value, + preprocess_value=preprocess_value_, + get_expected_values=get_expected_values_, + ignore_mismatching_result=ignore_mismatching_result, + set_value=set_value, + min_docker_api=min_docker_api, + update_value=update_value, + ) + + +def _get_value_detach_interactive(module, container, api_version, options): + attach_stdin = container.get('AttachStdin') + attach_stderr = container.get('AttachStderr') + attach_stdout = container.get('AttachStdout') + return { + 'interactive': bool(attach_stdin), + 'detach': not (attach_stderr and attach_stdout), + } + + +def _set_value_detach_interactive(module, data, api_version, options, values): + interactive = values.get('interactive') + detach = values.get('detach') + + data['AttachStdout'] = False + data['AttachStderr'] = False + data['AttachStdin'] = False + data['StdinOnce'] = False + if not detach: + data['AttachStdout'] = True + data['AttachStderr'] = True + if interactive: + data['AttachStdin'] = True + data['StdinOnce'] = True -def _preprocess_command(module, api_version, value): +def _preprocess_command(module, client, api_version, value): if module.params['command_handling'] == 'correct': if value is not None: if not isinstance(value, list): @@ -286,13 +364,49 @@ def _preprocess_entrypoint(module, api_version, value): return value -def _preprocess_cpus(module, api_version, value): +def _preprocess_env(module, values): + if not values: + return {} + final_env = {} + if 'env_file' in values: + parsed_env_file = parse_env_file(values['env_file']) + for name, value in parsed_env_file.items(): + final_env[name] = to_text(value, errors='surrogate_or_strict') + if 'env' in values: + for name, value in values['env'].items(): + if not isinstance(value, string_types): + module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be ' + 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) + final_env[name] = to_text(value, errors='surrogate_or_strict') + formatted_env = [] + for key, value in final_env: + formatted_env.append('%s=%s' % (key, value)) + return formatted_env + + +def _get_expected_env_value(module, client, api_version, image, value, sentry): + expected_env = {} + if image and image['Config'].get('Env'): + for env_var in image['Config']['Env']: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + if value and value is not sentry: + for env_var in value: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + param_env = [] + for key, env_value in expected_env.items(): + param_env.append("%s=%s" % (key, env_value)) + return param_env + + +def _preprocess_cpus(module, client, api_version, value): if value is not None: value = int(round(value * 1E9)) return value -def _preprocess_devices(module, api_version, value): +def _preprocess_devices(module, client, api_version, value): if not value: return value expected_devices = [] @@ -324,7 +438,7 @@ def _preprocess_devices(module, api_version, value): return expected_devices -def _preprocess_rate_bps(module, api_version, value, name): +def _preprocess_rate_bps(module, client, api_version, value): if not value: return value devices = [] @@ -336,7 +450,7 @@ def _preprocess_rate_bps(module, api_version, value, name): return devices -def _preprocess_rate_iops(module, api_version, value, name): +def _preprocess_rate_iops(module, client, api_version, value): if not value: return value devices = [] @@ -348,7 +462,7 @@ def _preprocess_rate_iops(module, api_version, value, name): return devices -def _preprocess_device_requests(module, api_version, value): +def _preprocess_device_requests(module, client, api_version, value): if not value: return value device_requests = [] @@ -363,7 +477,7 @@ def _preprocess_device_requests(module, api_version, value): return device_requests -def _preprocess_etc_hosts(module, api_version, value): +def _preprocess_etc_hosts(module, client, api_version, value): if value is None: return value results = [] @@ -372,7 +486,7 @@ def _preprocess_etc_hosts(module, api_version, value): return results -def _preprocess_healthcheck(module, api_version, value): +def _preprocess_healthcheck(module, client, api_version, value): if value is None: return value healthcheck, disable_healthcheck = parse_healthcheck(value) @@ -410,7 +524,25 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) -def _preprocess_links(module, api_version, value): +def _get_image_labels(image): + if not image: + return {} + + # Can't use get('Labels', {}) because 'Labels' may be present and be None + return image['Config'].get('Labels') or {} + + +def _get_expected_labels_value(module, client, api_version, image, value, sentry): + if value is sentry: + return sentry + expected_labels = {} + if module.params['image_label_mismatch'] == 'ignore': + expected_labels.update(dict(_get_image_labels(image))) + expected_labels.update(value) + return expected_labels + + +def _preprocess_links(module, client, api_version, value): if value is None: return None @@ -426,6 +558,46 @@ def _preprocess_links(module, api_version, value): return result +def _ignore_mismatching_label_result(module, client, api_version, option, image, container_value, expected_value): + if option.comparison == 'strict' and module.params['image_label_mismatch'] == 'fail': + # If there are labels from the base image that should be removed and + # base_image_mismatch is fail we want raise an error. + image_labels = _get_image_labels(image) + would_remove_labels = [] + for label in image_labels: + if label not in module.params['labels'] or {}: + # Format label for error message + would_remove_labels.append('"%s"' % (label, )) + if would_remove_labels: + msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" + " this error. Labels: {0}") + module.fail_json(msg=msg.format(', '.join(would_remove_labels))) + return False + + +def _preprocess_mac_address(module, values): + if 'mac_address' not in values: + return values + return { + 'mac_address': values['mac_address'].replace('-', ':'), + } + + +def _preprocess_container_names(module, client, api_version, value): + if value is None or not value.startswith('container:'): + return value + container_name = value[len('container:'):] + # Try to inspect container to see whether this is an ID or a + # name (and in the latter case, retrieve it's ID) + container = client.get_container(container_name) + if container is None: + # If we can't find the container, issue a warning and continue with + # what the user specified. + module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) + return value + return 'container:{0}'.format(container['Id']) + + OPTIONS = [ OptionGroup() .add_option('auto_remove', type='bool') @@ -433,7 +605,7 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('blkio_weight', type='int') - .add_docker_api(DockerAPIEngine.config_value('BlkioWeight')), + .add_docker_api(DockerAPIEngine.config_value('BlkioWeight', update_parameter='BlkioWeight')), OptionGroup() .add_option('capabilities', type='set', elements='str') @@ -453,23 +625,23 @@ def _preprocess_links(module, api_version, value): OptionGroup() .add_option('cpu_period', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpuPeriod')), + .add_docker_api(DockerAPIEngine.config_value('CpuPeriod', update_parameter='CpuPeriod')), OptionGroup() .add_option('cpu_quota', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpuQuota')), + .add_docker_api(DockerAPIEngine.config_value('CpuQuota', update_parameter='CpuQuota')), OptionGroup() .add_option('cpuset_cpus', type='str') - .add_docker_api(DockerAPIEngine.config_value('CpuShares')), + .add_docker_api(DockerAPIEngine.config_value('CpuShares', update_parameter='CpuShares')), OptionGroup() .add_option('cpuset_mems', type='str') - .add_docker_api(DockerAPIEngine.config_value('CpusetCpus')), + .add_docker_api(DockerAPIEngine.config_value('CpusetCpus', update_parameter='CpusetCpus')), OptionGroup() .add_option('cpu_shares', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpusetMems')), + .add_docker_api(DockerAPIEngine.config_value('CpusetMems', update_parameter='CpusetMems')), OptionGroup() .add_option('entrypoint', type='list', elements='str') @@ -479,6 +651,11 @@ def _preprocess_links(module, api_version, value): .add_option('cpus', type='int', ansible_type='float') .add_docker_api(DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus)), + OptionGroup() + .add_option('detach', type='bool') + .add_option('interactive', type='bool') + .add_docker_api(DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive)), + OptionGroup() .add_option('devices', type='set', elements='str') .add_docker_api(DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)), @@ -488,28 +665,28 @@ def _preprocess_links(module, api_version, value): path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=partial(_preprocess_rate_bps, name='device_read_bps'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps)), OptionGroup() .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=partial(_preprocess_rate_bps, name='device_write_bps'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps)), OptionGroup() .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=partial(_preprocess_rate_iops, name='device_read_iops'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops)), OptionGroup() .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=partial(_preprocess_rate_iops, name='device_write_iops'))), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops)), OptionGroup() .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( @@ -537,6 +714,11 @@ def _preprocess_links(module, api_version, value): .add_option('domainname', type='str') .add_docker_api(DockerAPIEngine.config_value('Domainname')), + OptionGroup(preprocess=_preprocess_env) + .add_option('env', type='set', ansible_type='dict', elements='str') + .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True) + .add_docker_api(DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value)), + OptionGroup() .add_option('etc_hosts', type='set', ansible_type='dict', elements='str') .add_docker_api(DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)), @@ -564,42 +746,50 @@ def _preprocess_links(module, api_version, value): .add_docker_api(DockerAPIEngine.host_config_value('Init')), OptionGroup() - .add_option('interactive', type='bool') - .add_docker_api(DockerAPIEngine.config_value('OpenStdin')), + .add_option('ipc_mode', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names)), OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) .add_option('kernel_memory', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory')), + .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory')), + + OptionGroup() + .add_option('labels', type='dict', needs_no_suboptions=True) + .add_docker_api(DockerAPIEngine.config_value('Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)), OptionGroup() .add_option('links', type='set', elements='list', ansible_elements='str') .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_value=_preprocess_links)), + OptionGroup(preprocess=_preprocess_mac_address) + .add_option('mac_address', type='str') + .add_docker_api(DockerAPIEngine.config_value('MacAddress')), + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) .add_option('memory', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('Memory')), + .add_docker_api(DockerAPIEngine.host_config_value('Memory', update_parameter='Memory')), OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) .add_option('memory_reservation', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation')), + .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation')), OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) .add_option('memory_swap', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap')), + .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap')), OptionGroup() .add_option('memory_swappiness', type='int') .add_docker_api(DockerAPIEngine.host_config_value('MemorySwappiness')), + + OptionGroup() + .add_option('stop_timeout', type='int', default_comparison='ignore') + .add_docker_api(DockerAPIEngine.config_value('StopTimeout')), ] # Options / option groups that are more complex: -# detach=dict(type='bool'), -# env=dict(type='dict'), -# env_file=dict(type='path'), # exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), -# ipc_mode=dict(type='str'), -# labels=dict(type='dict'), - +# publish_all_ports=dict(type='bool'), +# published_ports=dict(type='list', elements='str', aliases=['ports']), # OptionGroup(ansible_required_by={'log_options': ['log_driver']}) # .add_option('log_driver', type='str') @@ -609,15 +799,8 @@ def _preprocess_links(module, api_version, value): # OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) # .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) # .add_option('restart_retries', type='int') -# .add_docker_api(...) - -# if self.mac_address: -# # Ensure the MAC address uses colons instead of hyphens for later comparison -# self.mac_address = self.mac_address.replace('-', ':') -# -# mac_address=config.get('MacAddress', network.get('MacAddress')), -# -# mac_address=dict(type='str'), +# .add_docker_api(..., ) +# ---------------- only for policy: update_parameter='RestartPolicy' # REQUIRES_CONVERSION_TO_BYTES = [ # 'shm_size' @@ -644,13 +827,10 @@ def _preprocess_links(module, api_version, value): # pid_mode=dict(type='str'), # pids_limit=dict(type='int'), # privileged=dict(type='bool'), -# publish_all_ports=dict(type='bool'), -# published_ports=dict(type='list', elements='str', aliases=['ports']), # read_only=dict(type='bool'), # runtime=dict(type='str'), # security_opts=dict(type='list', elements='str'), # shm_size=dict(type='str'), -# stop_timeout=dict(type='int'), default_comparison='ignore' # storage_opts=dict(type='dict'), # sysctls=dict(type='dict'), # tmpfs=dict(type='list', elements='str'), @@ -669,7 +849,3 @@ def _preprocess_links(module, api_version, value): # mounts='set(dict)', # ulimits='set(dict)', # ) -# -# default_values = dict( -# stop_timeout='ignore', -# } diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 83686fe3e..9cf79fad2 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1416,7 +1416,7 @@ def _collect_params(self, active_options): if self.module.params[option.name] is not None: values[option.name] = self.module.params[option.name] values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client.docker_api_version, options.options, values) + engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) parameters.append((options, values)) return parameters @@ -1532,7 +1532,7 @@ def present(self, state): container_created = True if container and container.exists: - container = self.update_limits(container) + container = self.update_limits(container, image) container = self.update_networks(container, container_created) if state == 'started' and not container.running: @@ -1637,13 +1637,16 @@ def has_different_configuration(self, container, image): for options, param_values in self.parameters: engine = options.get_engine('docker_api') container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) for option in options.options: - if option.name in param_values: - param_value = param_values[option.name] + if option.name in expected_values: + param_value = expected_values[option.name] container_value = container_values.get(option.name) match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) if not match: + if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, option, image, container_value, param_value): + continue # TODO # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: # # If the healthcheck is disabled (both in parameters and for the current container), and the user @@ -1651,30 +1654,16 @@ def has_different_configuration(self, container, image): # # expected_healthcheck comparison in this case. # continue - if option.name == 'labels' and compare['comparison'] == 'strict' and self.param_image_label_mismatch == 'fail': - # If there are labels from the base image that should be removed and - # base_image_mismatch is fail we want raise an error. - image_labels = self._get_image_labels(image) - would_remove_labels = [] - for label in image_labels: - if label not in self.module.params['labels']: - # Format label for error message - would_remove_labels.append(label) - if would_remove_labels: - msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" - " this error. Labels: {0}") - self.fail(msg.format(', '.join(['"%s"' % label for label in would_remove_labels]))) - # no match. record the differences p = param_value c = container_value - if compare['type'] == 'set': + if option.comparison_type == 'set': # Since the order does not matter, sort so that the diff output is better. if p is not None: p = sorted(p) if c is not None: c = sorted(c) - elif compare['type'] == 'set(dict)': + elif option.comparison_type == 'set(dict)': # Since the order does not matter, sort so that the diff output is better. if option.name == 'expected_mounts': # For selected values, use one entry as key @@ -1693,7 +1682,7 @@ def sort_key_fn(x): has_differences = not differences.empty return has_differences, differences - def has_different_resource_limits(self, container): + def has_different_resource_limits(self, container, image): ''' Diff parameters and container resource limits ''' @@ -1703,9 +1692,10 @@ def has_different_resource_limits(self, container): if not engine.can_update_value(self.client.docker_api_version): continue container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) for option in options.options: - if option.name in param_values: - param_value = param_values[option.name] + if option.name in expected_values: + param_value = expected_values[option.name] container_value = container_values.get(option.name) match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) @@ -1724,8 +1714,8 @@ def _compose_update_parameters(self): engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) return result - def update_limits(self, container): - limits_differ, different_limits = self.has_different_resource_limits(container) + def update_limits(self, container, image): + limits_differ, different_limits = self.has_different_resource_limits(container, image) if limits_differ: self.log("limit differences:") self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) @@ -2029,7 +2019,7 @@ def container_restart(self, container_id): client_timeout = self.client.timeout if client_timeout is not None: client_timeout += timeout - self.client.post('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) + self.client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) except Exception as exc: self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) return self._get_container(container_id) @@ -2053,7 +2043,7 @@ def container_stop(self, container_id): client_timeout = self.client.timeout if client_timeout is not None: client_timeout += timeout - self.client.post('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) + self.client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed From ba0ffff2d4f8abf5b510e068f0dfcff0ba736cfb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 17:09:11 +0200 Subject: [PATCH 10/38] More 'easy' options. --- plugins/module_utils/module_container.py | 95 ++++++++++++++++++------ 1 file changed, 72 insertions(+), 23 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index da09a4cc8..f9a0a8e16 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -784,6 +784,78 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('stop_timeout', type='int', default_comparison='ignore') .add_docker_api(DockerAPIEngine.config_value('StopTimeout')), + + OptionGroup() + .add_option('network_mode', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('NetworkMode', preprocess_value=_preprocess_container_names)), + + OptionGroup() + .add_option('oom_killer', type='bool') + .add_docker_api(DockerAPIEngine.host_config_value('OomKillDisable')), + + OptionGroup() + .add_option('oom_score_adj', type='int') + .add_docker_api(DockerAPIEngine.host_config_value('OomScoreAdj')), + + OptionGroup() + .add_option('pid_mode', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names)), + + OptionGroup() + .add_option('pids_limit', type='int') + .add_docker_api(DockerAPIEngine.host_config_value('PidsLimit')), + + OptionGroup() + .add_option('privileged', type='bool') + .add_docker_api(DockerAPIEngine.host_config_value('Privileged')), + + OptionGroup() + .add_option('read_only', type='bool') + .add_docker_api(DockerAPIEngine.host_config_value('ReadonlyRootfs')), + + OptionGroup() + .add_option('runtime', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('Runtime')), + + OptionGroup() + .add_option('security_opts', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('SecurityOpt')), + + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size')) + .add_option('shm_size', type='int', ansible_type='str') + .add_docker_api(DockerAPIEngine.host_config_value('ShmSize')), + + OptionGroup() + .add_option('storage_opts', type='dict', needs_no_suboptions=True) + .add_docker_api(DockerAPIEngine.host_config_value('StorageOpt')), + + OptionGroup() + .add_option('tty', type='bool') + .add_docker_api(DockerAPIEngine.config_value('Tty')), + + OptionGroup() + .add_option('user', type='str') + .add_docker_api(DockerAPIEngine.config_value('User')), + + OptionGroup() + .add_option('userns_mode', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('UsernsMode')), + + OptionGroup() + .add_option('uts', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('UTSMode')), + + OptionGroup() + .add_option('volume_driver', type='str') + .add_docker_api(DockerAPIEngine.host_config_value('VolumeDriver')), + + OptionGroup() + .add_option('volumes_from', type='set', elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('VolumesFrom')), + + OptionGroup() + .add_option('working_dir', type='str') + .add_docker_api(DockerAPIEngine.config_value('WorkingDir')), ] # Options / option groups that are more complex: @@ -802,10 +874,6 @@ def _preprocess_container_names(module, client, api_version, value): # .add_docker_api(..., ) # ---------------- only for policy: update_parameter='RestartPolicy' -# REQUIRES_CONVERSION_TO_BYTES = [ -# 'shm_size' -# ] - # Options to convert / triage: # mounts=dict(type='list', elements='dict', options=dict( # target=dict(type='str', required=True), @@ -821,31 +889,12 @@ def _preprocess_container_names(module, client, api_version, value): # tmpfs_size=dict(type='str'), # tmpfs_mode=dict(type='str'), # )), -# network_mode=dict(type='str'), -# oom_killer=dict(type='bool'), -# oom_score_adj=dict(type='int'), -# pid_mode=dict(type='str'), -# pids_limit=dict(type='int'), -# privileged=dict(type='bool'), -# read_only=dict(type='bool'), -# runtime=dict(type='str'), -# security_opts=dict(type='list', elements='str'), -# shm_size=dict(type='str'), -# storage_opts=dict(type='dict'), # sysctls=dict(type='dict'), # tmpfs=dict(type='list', elements='str'), -# tty=dict(type='bool'), # ulimits=dict(type='list', elements='str'), -# user=dict(type='str'), -# userns_mode=dict(type='str'), -# uts=dict(type='str'), -# volume_driver=dict(type='str'), # volumes=dict(type='list', elements='str'), -# volumes_from=dict(type='list', elements='str'), -# working_dir=dict(type='str'), # # explicit_types = dict( -# env='set', # mounts='set(dict)', # ulimits='set(dict)', # ) From d74da30e91a2d60b1548e09a9f7527ea29c1b053 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 18:20:41 +0200 Subject: [PATCH 11/38] More options. --- plugins/module_utils/module_container.py | 125 +++++++++++++++++++++++ 1 file changed, 125 insertions(+) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index f9a0a8e16..031b1226c 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -1,4 +1,5 @@ # Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import shlex @@ -25,6 +26,17 @@ def _get_ansible_type(type): return type +_MOUNT_OPTION_TYPES = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', +) + + class Option(object): def __init__( self, @@ -309,6 +321,64 @@ def update_value(module, data, api_version, options, values): ) +def is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def parse_port_range(range_or_port, client): + ''' + Parses a string containing either a single port or a range of ports. + + Returns a list of integers for each port in the list. + ''' + if '-' in range_or_port: + try: + start, end = [int(port) for port in range_or_port.split('-')] + except Exception: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + if end < start: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + return list(range(start, end + 1)) + else: + try: + return [int(range_or_port)] + except Exception: + client.fail('Invalid port: "{0}"'.format(range_or_port)) + + +def split_colon_ipv6(text, client): + ''' + Split string by ':', while keeping IPv6 addresses in square brackets in one component. + ''' + if '[' not in text: + return text.split(':') + start = 0 + result = [] + while start < len(text): + i = text.find('[', start) + if i < 0: + result.extend(text[start:].split(':')) + break + j = text.find(']', i) + if j < 0: + client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + result.extend(text[start:i].split(':')) + k = text.find(':', j) + if k < 0: + result[-1] += text[i:] + start = len(text) + else: + result[-1] += text[i:k] + if k == len(text): + result.append('') + break + start = k + 1 + return result + + def _get_value_detach_interactive(module, container, api_version, options): attach_stdin = container.get('AttachStdin') attach_stderr = container.get('AttachStderr') @@ -583,6 +653,49 @@ def _preprocess_mac_address(module, values): } +def _get_expected_sysctls_value(module, client, api_version, image, value, sentry): + if value is sentry: + return value + result = {} + for key, sysctl_value in value: + result[key] = to_text(sysctl_value, errors='surrogate_or_strict') + return result + + +def _preprocess_tmpfs(module, values): + if 'tmpfs' not in values: + return values + result = {} + for tmpfs_spec in values['tmpfs']: + split_spec = tmpfs_spec.split(":", 1) + if len(split_spec) > 1: + result[split_spec[0]] = split_spec[1] + else: + result[split_spec[0]] = "" + return { + 'tmpfs': result + } + + +def _preprocess_ulimits(module, values): + if 'ulimits' not in values: + return values + result = [] + for value in values['ulimits']: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['Name'] = pieces[0] + limits['Soft'] = int(pieces[1]) + limits['Hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['Hard'] = int(pieces[2]) + results.append(limits) + return { + 'ulimits': result, + } + + def _preprocess_container_names(module, client, api_version, value): if value is None or not value.startswith('container:'): return value @@ -829,10 +942,22 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('storage_opts', type='dict', needs_no_suboptions=True) .add_docker_api(DockerAPIEngine.host_config_value('StorageOpt')), + OptionGroup() + .add_option('sysctls', type='dict', needs_no_suboptions=True) + .add_docker_api(DockerAPIEngine.host_config_value('Sysctls', get_expected_value=_get_expected_sysctls_value)), + + OptionGroup(preprocess=_preprocess_tmpfs) + .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('Tmpfs')), + OptionGroup() .add_option('tty', type='bool') .add_docker_api(DockerAPIEngine.config_value('Tty')), + OptionGroup(preprocess=_preprocess_ulimits) + .add_option('ulimits', type='set', elements='dict', ansible_elements='str') + .add_docker_api(DockerAPIEngine.host_config_value('Ulimits')), + OptionGroup() .add_option('user', type='str') .add_docker_api(DockerAPIEngine.config_value('User')), From 41d6e6f253e73f95070ba416a895ab004b017789 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 19:48:35 +0200 Subject: [PATCH 12/38] Work on volumes and mounts. --- plugins/module_utils/module_container.py | 331 ++++++++++++++++++++--- plugins/modules/docker_container2.py | 13 +- 2 files changed, 300 insertions(+), 44 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 031b1226c..7ee2bde99 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -52,6 +52,8 @@ def __init__( needs_no_suboptions=False, default_comparison=None, not_a_container_option=False, + not_an_ansible_option=False, + copy_comparison_from=None, ): self.name = name self.type = type @@ -68,10 +70,10 @@ def __init__( raise Exception('Ansible elements required for Ansible lists') self.elements = elements if needs_elements else None self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None - needs_suboptions = (self.type in ('list', 'set') and elements == 'dict') or (self.type == 'dict') + needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict') if ansible_suboptions is not None and not needs_suboptions: raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') - if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions: + if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option: raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') self.ansible_suboptions = ansible_suboptions if needs_suboptions else None self.ansible_aliases = ansible_aliases or [] @@ -89,6 +91,8 @@ def __init__( else: self.comparison = 'allow_more_present' self.not_a_container_option = not_a_container_option + self.not_an_ansible_option = not_an_ansible_option + self.copy_comparison_from = copy_comparison_from class OptionGroup(object): @@ -118,18 +122,19 @@ def add_option(self, *args, **kwargs): option = Option(*args, owner=self, **kwargs) if not option.not_a_container_option: self.options.append(option) - ansible_option = { - 'type': option.ansible_type, - } - if option.ansible_elements is not None: - ansible_option['elements'] = option.ansible_elements - if option.ansible_suboptions is not None: - ansible_option['options'] = option.ansible_suboptions - if option.ansible_aliases: - ansible_option['aliases'] = option.ansible_aliases - if option.ansible_choices is not None: - ansible_option['choices'] = option.ansible_choices - self.argument_spec[option.name] = ansible_option + if not option.not_an_ansible_option: + ansible_option = { + 'type': option.ansible_type, + } + if option.ansible_elements is not None: + ansible_option['elements'] = option.ansible_elements + if option.ansible_suboptions is not None: + ansible_option['options'] = option.ansible_suboptions + if option.ansible_aliases: + ansible_option['aliases'] = option.ansible_aliases + if option.ansible_choices is not None: + ansible_option['choices'] = option.ansible_choices + self.argument_spec[option.name] = ansible_option return self def supports_engine(self, engine_name): @@ -696,6 +701,252 @@ def _preprocess_ulimits(module, values): } +def _preprocess_mounts(module, values): + last = dict() + + def check_collision(t, name): + if t in last: + if name == last[t]: + module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name)) + else: + module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + last[t] = name + + if 'mounts' in values: + mounts = [] + for mount in values['mounts']: + target = mount['target'] + mount_type = mount['type'] + + check_collision(target, 'mounts') + + mount_dict = dict(mount) + + # Sanity checks + if mount['source'] is None and mount_type not in ('tmpfs', 'volume'): + module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) + for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): + if mount[option] is not None and mount_type != req_mount_type: + self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)) + + # Streamline options + volume_options = mount_dict.pop('volume_options') + if mount_dict['volume_driver'] and volume_options: + mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options) + if mount_dict['labels']: + mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) + if mount_dict['tmpfs_size'] is not None: + try: + mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) + except ValueError as exc: + module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) + if mount_dict['tmpfs_mode'] is not None: + try: + mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) + except Exception as dummy: + module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + + # Add result to list + mounts.append(omit_none_from_dict(mount_dict)) + values['mounts'] = mounts + if 'volumes' in values: + new_vols = [] + for vol in values['volumes']: + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + if re.match(r'[.~]', host): + host = os.path.abspath(os.path.expanduser(host)) + f(container, 'volumes') + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(parts) == 2: + if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + host = os.path.abspath(os.path.expanduser(parts[0])) + f(parts[1], 'volumes') + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + check_collision(vol.split(':', 1)[0], 'volumes') + new_vols.append(vol) + values['volumes'] = new_vols + new_binds = [] + for vol in new_vols: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + elif len(parts) == 2: + if not is_volume_permissions(parts[1]): + host, container, mode = (parts + ['rw']) + if host is not None: + new_binds.append('%s:%s:%s' % (host, container, mode)) + values['volume_binds'] = new_binds + return values + + +def _get_values_mounts(module, container, api_version, options): + volumes = container['Config'].get('Volumes') + binds = container['HostConfig'].get('Binds') + # According to https://github.com/moby/moby/, support for HostConfig.Mounts + # has been included at least since v17.03.0-ce, which has API version 1.26. + # The previous tag, v1.9.1, has API version 1.21 and does not have + # HostConfig.Mounts. I have no idea what about API 1.25... + mounts = container['HostConfig'].get('Mounts') + if mounts is not None: + result = [] + empty_dict = {} + for mount in mounts: + result.append({ + 'type': mount.get('Type'), + 'source': mount.get('Source'), + 'target': mount.get('Target'), + 'read_only': mount.get('ReadOnly', False), # golang's omitempty for bool returns None for False + 'consistency': mount.get('Consistency'), + 'propagation': mount.get('BindOptions', empty_dict).get('Propagation'), + 'no_copy': mount.get('VolumeOptions', empty_dict).get('NoCopy', False), + 'labels': mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict), + 'volume_driver': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name'), + 'volume_options': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict), + 'tmpfs_size': mount.get('TmpfsOptions', empty_dict).get('SizeBytes'), + 'tmpfs_mode': mount.get('TmpfsOptions', empty_dict).get('Mode'), + }) + mounts = result + result = {} + if volumes is not None: + result['volumes'] = volumes + if binds is not None: + result['volume_binds'] = binds + if mounts is not None: + result['mounts'] = mounts + return result + + +def _get_bind_from_dict(volume_dict): + results = [] + if volume_dict: + for host_path, config in volume_dict.items(): + if isinstance(config, dict) and config.get('bind'): + container_path = config.get('bind') + mode = config.get('mode', 'rw') + results.append("%s:%s:%s" % (host_path, container_path, mode)) + return results + + +def _get_image_binds(volumes): + ''' + Convert array of binds to array of strings with format host_path:container_path:mode + + :param volumes: array of bind dicts + :return: array of strings + ''' + results = [] + if isinstance(volumes, dict): + results += _get_bind_from_dict(volumes) + elif isinstance(volumes, list): + for vol in volumes: + results += _get_bind_from_dict(vol) + return results + + +def _get_expected_values_mounts(module, client, api_version, options, image, values): + expected_values = {} + + # binds + if 'mounts' in values: + expected_values['mounts'] = values['mounts'] + + # volumes + expected_vols = dict() + if image and image['Config'].get('Volumes'): + expected_vols.update(image['Config'].get('Volumes')) + if 'volumes' in values: + for vol in values['volumes']: + # We only expect anonymous volumes to show up in the list + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + continue + expected_vols[vol] = dict() + if expected_vols: + expected_values['volumes'] = expected_vols + + # binds + image_vols = [] + if image: + image_vols = _get_image_binds(image['Config'].get('Volumes')) + param_vols = [] + if 'volume_binds' in values: + param_vols = values['volume_binds'] + expected_values['volume_binds'] = list(set(image_vols + param_vols)) + + return expected_values + + +def _set_values_mounts(module, data, api_version, options, values): + if 'mounts' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + mounts = [] + for mount in values['mounts']: + mount_type = mount.get('type') + mount_res = { + 'Target': mount.get('target'), + 'Source': mount.get('source'), + 'Type': mount_type, + 'ReadOnly': mount.get('read_only'), + } + if 'consistency' in mount: + mount_res['Consistency'] = mount['consistency'] + if mount_type == 'bind': + if 'propagation' in mount: + mount_res['BindOptions'] = { + 'Propagation': mount['propagation'], + } + if mount_type == 'volume': + volume_opts = {} + if mount.get('no_copy'): + volume_opts['NoCopy'] = True + if mount.get('labels'): + volume_opts['Labels'] = mount.get('labels') + if mount.get('volume_driver'): + driver_config = { + 'Name': mount.get('volume_driver'), + } + if mount.get('volume_options'): + driver_config['Options'] = mount.get('volume_options') + volume_opts['DriverConfig'] = driver_config + if volume_opts: + mount_res['VolumeOptions'] = volume_opts + if mount_type == 'tmpfs': + tmpfs_opts = {} + if mount.get('tmpfs_mode'): + tmpfs_opts['Mode'] = mount.get('tmpfs_mode') + if mount.get('tmpfs_size'): + tmpfs_opts['SizeBytes'] = mount.get('tmpfs_size') + if mount.get('tmpfs_opts'): + mount_res['TmpfsOptions'] = mount.get('tmpfs_opts') + mounts.append(mount_res) + data['HostConfig']['Mounts'] = mounts + if 'volumes' in values: + volumes = {} + for volume in values['volumes']: + volumes[volume] = {} + data['Volumes'] = volumes + if 'volume_binds' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['Binds'] = values['volume_binds'] + + def _preprocess_container_names(module, client, api_version, value): if value is None or not value.startswith('container:'): return value @@ -828,12 +1079,12 @@ def _preprocess_container_names(module, client, api_version, value): .add_docker_api(DockerAPIEngine.config_value('Domainname')), OptionGroup(preprocess=_preprocess_env) - .add_option('env', type='set', ansible_type='dict', elements='str') + .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True) .add_docker_api(DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value)), OptionGroup() - .add_option('etc_hosts', type='set', ansible_type='dict', elements='str') + .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) .add_docker_api(DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)), OptionGroup() @@ -981,6 +1232,29 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('working_dir', type='str') .add_docker_api(DockerAPIEngine.config_value('WorkingDir')), + + OptionGroup(preprocess=_preprocess_mounts) + .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict( + target=dict(type='str', required=True), + source=dict(type='str'), + type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), + read_only=dict(type='bool'), + consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), + propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), + no_copy=dict(type='bool'), + labels=dict(type='dict'), + volume_driver=dict(type='str'), + volume_options=dict(type='dict'), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='str'), + )) + .add_option('volumes', type='set', elements='str') + .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') + .add_docker_api(DockerAPIEngine( + get_value=_get_values_mounts, + get_expected_values=_get_expected_values_mounts, + set_value=_set_values_mounts, + )), ] # Options / option groups that are more complex: @@ -998,28 +1272,3 @@ def _preprocess_container_names(module, client, api_version, value): # .add_option('restart_retries', type='int') # .add_docker_api(..., ) # ---------------- only for policy: update_parameter='RestartPolicy' - -# Options to convert / triage: -# mounts=dict(type='list', elements='dict', options=dict( -# target=dict(type='str', required=True), -# source=dict(type='str'), -# type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), -# read_only=dict(type='bool'), -# consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), -# propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), -# no_copy=dict(type='bool'), -# labels=dict(type='dict'), -# volume_driver=dict(type='str'), -# volume_options=dict(type='dict'), -# tmpfs_size=dict(type='str'), -# tmpfs_mode=dict(type='str'), -# )), -# sysctls=dict(type='dict'), -# tmpfs=dict(type='list', elements='str'), -# ulimits=dict(type='list', elements='str'), -# volumes=dict(type='list', elements='str'), -# -# explicit_types = dict( -# mounts='set(dict)', -# ulimits='set(dict)', -# ) diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 9cf79fad2..3740dcd8e 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1332,6 +1332,8 @@ def _parse_comparisons(self): all_module_options = self._collect_all_module_params() comp_aliases = {} for option_name, option in self.all_options.items(): + if option.not_an_ansible_option: + continue comp_aliases[option_name] = option_name for alias in option.ansible_aliases: comp_aliases[alias] = option_name @@ -1365,7 +1367,7 @@ def _parse_comparisons(self): if key_main in all_module_options: self.fail("The module option '%s' cannot be specified in the comparisons dict, " "since it does not correspond to container's state!" % key) - if key not in self.all_options: + if key not in self.all_options or self.all_options[key].not_an_ansible_option: self.fail("Unknown module option '%s' in comparisons dict!" % key) key_main = key if key_main in comp_aliases_used: @@ -1380,6 +1382,10 @@ def _parse_comparisons(self): self.all_options[key_main].comparison = value else: self.fail("Unknown comparison mode '%s'!" % value) + # Copy values + for option in self.all_options.values(): + if option.copy_comparison_from is not None: + option.comparison = self.all_options[option.copy_comparison_from].comparison # Check legacy values if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': self.module.warn('The ignore_image option has been overridden by the comparisons option!') @@ -1413,7 +1419,7 @@ def _collect_params(self, active_options): values = {} engine = options.get_engine('docker_api') for option in options.options: - if self.module.params[option.name] is not None: + if not option.not_an_ansible_option and self.module.params[option.name] is not None: values[option.name] = self.module.params[option.name] values = options.preprocess(self.module, values) engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) @@ -2123,7 +2129,8 @@ def main(): engine = options.get_engine('docker_api') if engine.min_docker_api is not None: for option in options.options: - option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} + if not option.not_an_ansible_option: + option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} active_options.append(options) From 608d46497e3adf048da9e5371b304722036450e9 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 22:24:34 +0200 Subject: [PATCH 13/38] Add more options. --- plugins/module_utils/module_container.py | 119 +++++++++++++++++++---- 1 file changed, 101 insertions(+), 18 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 7ee2bde99..ef9a33058 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -2,16 +2,21 @@ # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +import os +import re import shlex from functools import partial from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.six import string_types from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion from ansible_collections.community.docker.plugins.module_utils.util import ( + clean_dict_booleans_for_docker_api, + omit_none_from_dict, parse_healthcheck, ) @@ -456,7 +461,9 @@ def _preprocess_env(module, values): formatted_env = [] for key, value in final_env: formatted_env.append('%s=%s' % (key, value)) - return formatted_env + return { + 'env': formatted_env, + } def _get_expected_env_value(module, client, api_version, image, value, sentry): @@ -686,7 +693,7 @@ def _preprocess_ulimits(module, values): if 'ulimits' not in values: return values result = [] - for value in values['ulimits']: + for limit in values['ulimits']: limits = dict() pieces = limit.split(':') if len(pieces) >= 2: @@ -695,7 +702,7 @@ def _preprocess_ulimits(module, values): limits['Hard'] = int(pieces[1]) if len(pieces) == 3: limits['Hard'] = int(pieces[2]) - results.append(limits) + result.append(limits) return { 'ulimits': result, } @@ -727,7 +734,7 @@ def check_collision(t, name): module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): if mount[option] is not None and mount_type != req_mount_type: - self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)) + module.fail_json(msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)) # Streamline options volume_options = mount_dict.pop('volume_options') @@ -760,13 +767,13 @@ def check_collision(t, name): module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) if re.match(r'[.~]', host): host = os.path.abspath(os.path.expanduser(host)) - f(container, 'volumes') + check_collision(container, 'volumes') new_vols.append("%s:%s:%s" % (host, container, mode)) continue elif len(parts) == 2: if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): host = os.path.abspath(os.path.expanduser(parts[0])) - f(parts[1], 'volumes') + check_collision(parts[1], 'volumes') new_vols.append("%s:%s:rw" % (host, parts[1])) continue check_collision(vol.split(':', 1)[0], 'volumes') @@ -947,6 +954,75 @@ def _set_values_mounts(module, data, api_version, options, values): data['HostConfig']['Binds'] = values['volume_binds'] +def _preprocess_log(module, values): + result = {} + if 'log_driver' not in values: + return result + result['log_driver'] = values['log_driver'] + if 'log_options' in values: + options = {} + for k, v in values['log_options'].items(): + if not isinstance(v, string_types): + module.warn( + "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " + "If this is not correct, or you want to avoid such warnings, please quote the value." % ( + k, to_text(v, errors='surrogate_or_strict')) + ) + v = to_text(v, errors='surrogate_or_strict') + options[k] = v + result['log_options'] = options + return result + + +def _get_values_log(module, container, api_version, options): + log_config = container['HostConfig'].get('LogConfig') or {} + return { + 'log_driver': log_config.get('Type'), + 'log_options': log_config.get('Config'), + } + + +def _set_values_log(module, data, api_version, options, values): + if 'log_driver' not in values: + return + log_config = { + 'Type': values['log_driver'], + 'Config': values.get('log_options') or {}, + } + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['LogConfig'] = log_config + + +def _get_values_restart(module, container, api_version, options): + restart_policy = container['HostConfig'].get('RestartPolicy') or {} + return { + 'restart_policy': restart_policy.get('Name'), + 'restart_retries': restart_policy.get('MaximumRetryCount'), + } + + +def _set_values_restart(module, data, api_version, options, values): + if 'restart_policy' not in values: + return + restart_policy = { + 'Name': values['restart_policy'], + 'MaximumRetryCount': values.get('restart_retries'), + } + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['RestartPolicy'] = restart_policy + + +def _update_value_restart(module, data, api_version, options, values): + if 'restart_policy' not in values: + return + data['RestartPolicy'] = { + 'Name': values['restart_policy'], + 'MaximumRetryCount': values.get('restart_retries'), + } + + def _preprocess_container_names(module, client, api_version, value): if value is None or not value.startswith('container:'): return value @@ -1125,6 +1201,14 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('links', type='set', elements='list', ansible_elements='str') .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_value=_preprocess_links)), + OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']}) + .add_option('log_driver', type='str') + .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True) + .add_docker_api(DockerAPIEngine( + get_value=_get_values_log, + set_value=_set_values_log, + )), + OptionGroup(preprocess=_preprocess_mac_address) .add_option('mac_address', type='str') .add_docker_api(DockerAPIEngine.config_value('MacAddress')), @@ -1177,6 +1261,16 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('read_only', type='bool') .add_docker_api(DockerAPIEngine.host_config_value('ReadonlyRootfs')), + OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) + .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) + .add_option('restart_retries', type='int') + .add_docker_api(..., ) + .add_docker_api(DockerAPIEngine( + get_value=_get_values_restart, + set_value=_set_values_restart, + update_value=_update_value_restart, + )), + OptionGroup() .add_option('runtime', type='str') .add_docker_api(DockerAPIEngine.host_config_value('Runtime')), @@ -1252,23 +1346,12 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') .add_docker_api(DockerAPIEngine( get_value=_get_values_mounts, - get_expected_values=_get_expected_values_mounts, set_value=_set_values_mounts, )), ] + # Options / option groups that are more complex: # exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), # publish_all_ports=dict(type='bool'), # published_ports=dict(type='list', elements='str', aliases=['ports']), - -# OptionGroup(ansible_required_by={'log_options': ['log_driver']}) -# .add_option('log_driver', type='str') -# .add_option('log_options', type='dict', ansible_aliases=['log_opt']) -# .add_docker_api(...) - -# OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) -# .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) -# .add_option('restart_retries', type='int') -# .add_docker_api(..., ) -# ---------------- only for policy: update_parameter='RestartPolicy' From 062bb3f0cd0162f764849d46b18be1d8e9a951a7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 23:13:00 +0200 Subject: [PATCH 14/38] The last option. --- changelogs/fragments/docker_container.yml | 1 + plugins/module_utils/module_container.py | 264 ++++++++++++++++++++-- plugins/modules/docker_container2.py | 14 ++ 3 files changed, 254 insertions(+), 25 deletions(-) diff --git a/changelogs/fragments/docker_container.yml b/changelogs/fragments/docker_container.yml index f922092fd..d49aea56a 100644 --- a/changelogs/fragments/docker_container.yml +++ b/changelogs/fragments/docker_container.yml @@ -5,6 +5,7 @@ major_changes: - "docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/...)." breaking_changes: - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``. Before, its value was assumed to be identical with the value of ``published_ports`` (https://github.com/ansible-collections/community.docker/pull/...)." - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." - "docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/...)." diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index ef9a33058..549d4efea 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -20,15 +20,13 @@ parse_healthcheck, ) -from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_env_file +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_env_file, + convert_port_bindings, +) -def _get_ansible_type(type): - if type == 'set': - return 'list' - if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): - raise Exception('Invalid type "%s"' % (type, )) - return type +_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]' _MOUNT_OPTION_TYPES = dict( @@ -42,6 +40,14 @@ def _get_ansible_type(type): ) +def _get_ansible_type(type): + if type == 'set': + return 'list' + if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): + raise Exception('Invalid type "%s"' % (type, )) + return type + + class Option(object): def __init__( self, @@ -331,14 +337,14 @@ def update_value(module, data, api_version, options, values): ) -def is_volume_permissions(mode): +def _is_volume_permissions(mode): for part in mode.split(','): if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): return False return True -def parse_port_range(range_or_port, client): +def _parse_port_range(range_or_port, module): ''' Parses a string containing either a single port or a range of ports. @@ -348,18 +354,18 @@ def parse_port_range(range_or_port, client): try: start, end = [int(port) for port in range_or_port.split('-')] except Exception: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) if end < start: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) return list(range(start, end + 1)) else: try: return [int(range_or_port)] except Exception: - client.fail('Invalid port: "{0}"'.format(range_or_port)) + module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port)) -def split_colon_ipv6(text, client): +def _split_colon_ipv6(text, module): ''' Split string by ':', while keeping IPv6 addresses in square brackets in one component. ''' @@ -374,7 +380,7 @@ def split_colon_ipv6(text, client): break j = text.find(']', i) if j < 0: - client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) result.extend(text[start:i].split(':')) k = text.find(':', j) if k < 0: @@ -389,6 +395,29 @@ def split_colon_ipv6(text, client): return result +def _normalize_port(port): + if '/' not in port: + return port + '/tcp' + return port + + +def _get_default_host_ip(module, client): + if module.params['default_host_ip'] is not None: + return module.params['default_host_ip'] + ip = '0.0.0.0' + for network_data in module.params['networks'] or []: + if network_data.get('name'): + network = client.get_network(network_data['name']) + if network is None: + module.fail_json( + msg="Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']), + ) + if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): + ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] + break + return ip + + def _get_value_detach_interactive(module, container, api_version, options): attach_stdin = container.get('AttachStdin') attach_stderr = container.get('AttachStderr') @@ -763,7 +792,7 @@ def check_collision(t, name): parts = vol.split(':') if len(parts) == 3: host, container, mode = parts - if not is_volume_permissions(mode): + if not _is_volume_permissions(mode): module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) if re.match(r'[.~]', host): host = os.path.abspath(os.path.expanduser(host)) @@ -771,7 +800,7 @@ def check_collision(t, name): new_vols.append("%s:%s:%s" % (host, container, mode)) continue elif len(parts) == 2: - if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): host = os.path.abspath(os.path.expanduser(parts[0])) check_collision(parts[1], 'volumes') new_vols.append("%s:%s:rw" % (host, parts[1])) @@ -786,10 +815,10 @@ def check_collision(t, name): parts = vol.split(':') if len(parts) == 3: host, container, mode = parts - if not is_volume_permissions(mode): + if not _is_volume_permissions(mode): module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) elif len(parts) == 2: - if not is_volume_permissions(parts[1]): + if not _is_volume_permissions(parts[1]): host, container, mode = (parts + ['rw']) if host is not None: new_binds.append('%s:%s:%s' % (host, container, mode)) @@ -880,7 +909,7 @@ def _get_expected_values_mounts(module, client, api_version, options, image, val if len(parts) == 3: continue if len(parts) == 2: - if not is_volume_permissions(parts[1]): + if not _is_volume_permissions(parts[1]): continue expected_vols[vol] = dict() if expected_vols: @@ -1023,6 +1052,184 @@ def _update_value_restart(module, data, api_version, options, values): } +def _preprocess_ports(module, values): + if 'published_ports' in values: + if 'all' in values['published_ports']: + module.fail_json( + msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' + 'to randomly assign port mappings for those not specified by published_ports.') + + binds = {} + for port in values['published_ports']: + parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module) + container_port = parts[-1] + protocol = '' + if '/' in container_port: + container_port, protocol = parts[-1].split('/') + container_ports = _parse_port_range(container_port, module) + + p_len = len(parts) + if p_len == 1: + port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )] + elif p_len == 2: + if len(container_ports) == 1: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])] + else: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)] + elif p_len == 3: + # We only allow IPv4 and IPv6 addresses for the bind address + ipaddr = parts[0] + if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): + module.fail_json( + msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' + 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr) + ) + if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + ipaddr = ipaddr[1:-1] + if parts[1]: + if len(container_ports) == 1: + port_binds = [(ipaddr, parts[1])] + else: + port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)] + else: + port_binds = len(container_ports) * [(ipaddr,)] + else: + module.fail_json( + msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' + 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len) + ) + + for bind, container_port in zip(port_binds, container_ports): + idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + if idx in binds: + old_bind = binds[idx] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[idx] = [old_bind, bind] + else: + binds[idx] = bind + values['published_ports'] = binds + + exposed = [] + if 'exposed_ports' in values: + for port in values['exposed_ports']: + port = to_text(port, errors='surrogate_or_strict').strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if 'published_ports' in values: + # Any published port should also be exposed + for publish_port in values['published_ports']: + match = False + if isinstance(publish_port, string_types) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if exposed_port[1] != protocol: + continue + if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + values['ports'] = exposed + return values + + +def _get_values_ports(module, container, api_version, options): + host_config = container['HostConfig'] + config = container['Config'] + + # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 + if config.get('ExposedPorts') is not None: + expected_exposed = [_normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] + else: + expected_exposed = [] + + return { + 'published_ports': host_config.get('PortBindings'), + 'exposed_ports': expected_exposed, + 'publish_all_ports': host_config.get('PublishAllPorts'), + } + + +def _get_expected_values_ports(module, client, api_version, options, image, values): + expected_values = {} + + if 'published_ports' in values: + expected_bound_ports = {} + for container_port, config in values['published_ports'].items(): + if isinstance(container_port, int): + container_port = "%s/tcp" % container_port + if len(config) == 1: + if isinstance(config[0], int): + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for host_ip, host_port in config: + expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}] + expected_values['published_ports'] = expected_bound_ports + + image_ports = [] + if image: + image_exposed_ports = image['Config'].get('ExposedPorts') or {} + image_ports = [_normalize_port(p) for p in image_exposed_ports] + param_ports = [] + if 'ports' in values: + param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in values['ports']] + result = list(set(image_ports + param_ports)) + expected_values['exposed_ports'] = result + + if 'publish_all_ports' in values: + expected_values['publish_all_ports'] = values['publish_all_ports'] + + return expected_values + + +def _set_values_ports(module, data, api_version, options, values): + if 'ports' in values: + data['ExposedPorts'] = values['ports'] + if 'published_ports' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['PortBindings'] = convert_port_bindings(values['published_ports']) + if 'publish_all_ports' in values and values['publish_all_ports']: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['PublishAllPorts'] = values['publish_all_ports'] + + +def _preprocess_value_ports(module, client, api_version, options, values): + if 'published_ports' not in values: + return values + found = False + for port_spec in values['published_ports'].values(): + if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING: + found = True + break + if not found: + return values + default_ip = _get_default_host_ip(module, client) + for port, port_spec in values['published_ports'].items(): + if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING: + values['published_ports'][port] = (default_ip, *port_spec[1:]) + return values + + def _preprocess_container_names(module, client, api_version, value): if value is None or not value.startswith('container:'): return value @@ -1346,12 +1553,19 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') .add_docker_api(DockerAPIEngine( get_value=_get_values_mounts, + get_expected_values=_get_expected_values_mounts, set_value=_set_values_mounts, )), -] - -# Options / option groups that are more complex: -# exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), -# publish_all_ports=dict(type='bool'), -# published_ports=dict(type='list', elements='str', aliases=['ports']), + OptionGroup(preprocess=_preprocess_ports) + .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose']) + .add_option('publish_all_ports', type='bool') + .add_option('published_ports', type='set', elements='str', ansible_aliases=['ports']) + .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore') + .add_docker_api(DockerAPIEngine( + get_value=_get_values_ports, + get_expected_values=_get_expected_values_ports, + set_value=_set_values_ports, + preprocess_value=_preprocess_value_ports, + )), +] diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py index 3740dcd8e..1003276c7 100644 --- a/plugins/modules/docker_container2.py +++ b/plugins/modules/docker_container2.py @@ -1282,6 +1282,7 @@ def __init__(self, module, client, active_options): self.check_mode = self.module.check_mode self.param_cleanup = self.module.params['cleanup'] self.param_container_default_behavior = self.module.params['container_default_behavior'] + self.param_default_host_ip = self.module.params['default_host_ip'] self.param_debug = self.module.params['debug'] self.param_force_kill = self.module.params['force_kill'] self.param_image = self.module.params['image'] @@ -1305,6 +1306,19 @@ def __init__(self, module, client, active_options): self.diff = {} self.diff_tracker = DifferenceTracker() self.facts = {} + if self.param_default_host_ip: + valid_ip = False + if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): + self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) + valid_ip = True + if not valid_ip: + self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' + 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) + def _collect_all_options(self, active_options): all_options = {} From 139ef71af0b4ade986ed478375b221313d9057c5 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 23:15:31 +0200 Subject: [PATCH 15/38] Copy over. --- plugins/modules/docker_container.py | 2418 +++++--------------------- plugins/modules/docker_container2.py | 2175 ----------------------- 2 files changed, 481 insertions(+), 4112 deletions(-) delete mode 100644 plugins/modules/docker_container2.py diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 474824896..beceaf804 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -670,8 +670,6 @@ pid_mode: description: - Set the PID namespace mode for the container. - - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the - Docker SDK for Python (docker) allow all values supported by the Docker daemon. type: str pids_limit: description: @@ -898,7 +896,6 @@ - "Felix Fontein (@felixfontein)" requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" - "Docker API >= 1.25" ''' @@ -1209,15 +1206,20 @@ from time import sleep from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.six import string_types from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.six import string_types from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion -from ansible_collections.community.docker.plugins.module_utils.common import ( +from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, ) +from ansible_collections.community.docker.plugins.module_utils.module_container import ( + DockerAPIEngineDriver, + OPTIONS, + Option, +) from ansible_collections.community.docker.plugins.module_utils.util import ( DifferenceTracker, DockerBaseClass, @@ -1230,951 +1232,22 @@ DOCKER_COMMON_ARGS, ) -try: - from docker import utils - from ansible_collections.community.docker.plugins.module_utils.common import docker_version - if LooseVersion(docker_version) >= LooseVersion('1.10.0'): - from docker.types import Ulimit, LogConfig - from docker import types as docker_types - else: - from docker.utils.types import Ulimit, LogConfig - from docker.errors import DockerException, APIError, NotFound -except Exception: - # missing Docker SDK for Python handled in ansible.module_utils.docker.common - pass - - -REQUIRES_CONVERSION_TO_BYTES = [ - 'kernel_memory', - 'memory', - 'memory_reservation', - 'memory_swap', - 'shm_size' -] - - -def is_volume_permissions(mode): - for part in mode.split(','): - if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): - return False - return True - - -def parse_port_range(range_or_port, client): - ''' - Parses a string containing either a single port or a range of ports. - - Returns a list of integers for each port in the list. - ''' - if '-' in range_or_port: - try: - start, end = [int(port) for port in range_or_port.split('-')] - except Exception: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) - if end < start: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) - return list(range(start, end + 1)) - else: - try: - return [int(range_or_port)] - except Exception: - client.fail('Invalid port: "{0}"'.format(range_or_port)) - - -def split_colon_ipv6(text, client): - ''' - Split string by ':', while keeping IPv6 addresses in square brackets in one component. - ''' - if '[' not in text: - return text.split(':') - start = 0 - result = [] - while start < len(text): - i = text.find('[', start) - if i < 0: - result.extend(text[start:].split(':')) - break - j = text.find(']', i) - if j < 0: - client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) - result.extend(text[start:i].split(':')) - k = text.find(':', j) - if k < 0: - result[-1] += text[i:] - start = len(text) - else: - result[-1] += text[i:k] - if k == len(text): - result.append('') - break - start = k + 1 - return result - - -class TaskParameters(DockerBaseClass): - ''' - Access and parse module parameters - ''' - - def __init__(self, client): - super(TaskParameters, self).__init__() - self.client = client - - self.auto_remove = None - self.blkio_weight = None - self.capabilities = None - self.cap_drop = None - self.cleanup = None - self.command = None - self.cpu_period = None - self.cpu_quota = None - self.cpus = None - self.cpuset_cpus = None - self.cpuset_mems = None - self.cpu_shares = None - self.debug = None - self.default_host_ip = None - self.detach = None - self.devices = None - self.device_read_bps = None - self.device_write_bps = None - self.device_read_iops = None - self.device_write_iops = None - self.device_requests = None - self.dns_servers = None - self.dns_opts = None - self.dns_search_domains = None - self.domainname = None - self.env = None - self.env_file = None - self.entrypoint = None - self.etc_hosts = None - self.exposed_ports = None - self.force_kill = None - self.groups = None - self.healthcheck = None - self.hostname = None - self.ignore_image = None - self.image = None - self.init = None - self.interactive = None - self.ipc_mode = None - self.keep_volumes = None - self.kernel_memory = None - self.kill_signal = None - self.labels = None - self.links = None - self.log_driver = None - self.output_logs = None - self.log_options = None - self.mac_address = None - self.memory = None - self.memory_reservation = None - self.memory_swap = None - self.memory_swappiness = None - self.mounts = None - self.name = None - self.network_mode = None - self.userns_mode = None - self.networks = None - self.networks_cli_compatible = None - self.oom_killer = None - self.oom_score_adj = None - self.paused = None - self.pid_mode = None - self.pids_limit = None - self.privileged = None - self.purge_networks = None - self.pull = None - self.read_only = None - self.recreate = None - self.removal_wait_timeout = None - self.restart = None - self.restart_retries = None - self.restart_policy = None - self.runtime = None - self.shm_size = None - self.security_opts = None - self.state = None - self.stop_signal = None - self.stop_timeout = None - self.storage_opts = None - self.tmpfs = None - self.tty = None - self.user = None - self.uts = None - self.volumes = None - self.volume_binds = dict() - self.volumes_from = None - self.volume_driver = None - self.working_dir = None - - for key, value in client.module.params.items(): - setattr(self, key, value) - self.comparisons = client.comparisons - - # If state is 'absent', parameters do not have to be parsed or interpreted. - # Only the container's name is needed. - if self.state == 'absent': - return - - if self.default_host_ip: - valid_ip = False - if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.default_host_ip): - valid_ip = True - if re.match(r'^\[[0-9a-fA-F:]+\]$', self.default_host_ip): - valid_ip = True - if re.match(r'^[0-9a-fA-F:]+$', self.default_host_ip): - self.default_host_ip = '[{0}]'.format(self.default_host_ip) - valid_ip = True - if not valid_ip: - self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' - 'or an IPv6 address. Got "{0}" instead.'.format(self.default_host_ip)) - - if self.cpus is not None: - self.cpus = int(round(self.cpus * 1E9)) - - if self.groups: - # In case integers are passed as groups, we need to convert them to - # strings as docker internally treats them as strings. - self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups] - - for param_name in REQUIRES_CONVERSION_TO_BYTES: - if client.module.params.get(param_name): - if param_name == 'memory_swap' and client.module.params.get(param_name) in ['unlimited', '-1']: - setattr(self, param_name, -1) - else: - try: - setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) - except ValueError as exc: - self.fail("Failed to convert %s to bytes: %s" % (param_name, to_native(exc))) - - self.published_ports = self._parse_publish_ports() - - self.ports = self._parse_exposed_ports(self.published_ports) - self.log("expose ports:") - self.log(self.ports, pretty_print=True) - - self.links = self._parse_links(self.links) - - if self.volumes: - self.volumes = self._expand_host_paths() - - self.tmpfs = self._parse_tmpfs() - self.env = self._get_environment() - self.ulimits = self._parse_ulimits() - self.sysctls = self._parse_sysctls() - self.log_config = self._parse_log_config() - try: - self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck) - except ValueError as e: - self.fail(to_native(e)) - - self.exp_links = None - self.volume_binds = self._get_volume_binds(self.volumes) - self.pid_mode = self._replace_container_names(self.pid_mode) - self.ipc_mode = self._replace_container_names(self.ipc_mode) - self.network_mode = self._replace_container_names(self.network_mode) - - self.log("volumes:") - self.log(self.volumes, pretty_print=True) - self.log("volume binds:") - self.log(self.volume_binds, pretty_print=True) - - if self.networks: - for network in self.networks: - network['id'] = self._get_network_id(network['name']) - if not network['id']: - self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) - if network.get('links'): - network['links'] = self._parse_links(network['links']) - - if self.mac_address: - # Ensure the MAC address uses colons instead of hyphens for later comparison - self.mac_address = self.mac_address.replace('-', ':') - - if client.module.params['command_handling'] == 'correct': - if self.entrypoint is not None: - self.entrypoint = [to_text(x, errors='surrogate_or_strict') for x in self.entrypoint] - - if self.command is not None: - if not isinstance(self.command, list): - # convert from str to list - self.command = shlex.split(to_text(self.command, errors='surrogate_or_strict')) - self.command = [to_text(x, errors='surrogate_or_strict') for x in self.command] - else: - if self.entrypoint: - # convert from list to str. - self.entrypoint = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])) - self.entrypoint = [to_text(x, errors='surrogate_or_strict') for x in self.entrypoint] - - if self.command: - # convert from list to str - if isinstance(self.command, list): - self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command]) - else: - self.command = to_text(self.command, errors='surrogate_or_strict') - self.command = [to_text(x, errors='surrogate_or_strict') for x in shlex.split(self.command)] - - self.mounts_opt, self.expected_mounts = self._process_mounts() - - self._check_mount_target_collisions() - - for param_name in ["device_read_bps", "device_write_bps"]: - if client.module.params.get(param_name): - self._process_rate_bps(option=param_name) - - for param_name in ["device_read_iops", "device_write_iops"]: - if client.module.params.get(param_name): - self._process_rate_iops(option=param_name) - - if self.device_requests: - for dr_index, dr in enumerate(self.device_requests): - # Make sure that capabilities are lists of lists of strings - if dr['capabilities']: - for or_index, or_list in enumerate(dr['capabilities']): - for and_index, and_term in enumerate(or_list): - if not isinstance(and_term, string_types): - self.fail( - "device_requests[{0}].capabilities[{1}][{2}] is not a string".format( - dr_index, or_index, and_index)) - or_list[and_index] = to_native(and_term) - # Make sure that options is a dictionary mapping strings to strings - if dr['options']: - dr['options'] = clean_dict_booleans_for_docker_api(dr['options']) - - def fail(self, msg): - self.client.fail(msg) - - @property - def update_parameters(self): - ''' - Returns parameters used to update a container - ''' - - update_parameters = dict( - blkio_weight='blkio_weight', - cpu_period='cpu_period', - cpu_quota='cpu_quota', - cpu_shares='cpu_shares', - cpuset_cpus='cpuset_cpus', - cpuset_mems='cpuset_mems', - mem_limit='memory', - mem_reservation='memory_reservation', - memswap_limit='memory_swap', - kernel_memory='kernel_memory', - restart_policy='restart_policy', - ) - - result = dict() - for key, value in update_parameters.items(): - if getattr(self, value, None) is not None: - if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']: - restart_policy = dict(Name=self.restart_policy, - MaximumRetryCount=self.restart_retries) - result[key] = restart_policy - elif self.client.option_minimal_versions[value]['supported']: - result[key] = getattr(self, value) - return result - - @property - def create_parameters(self): - ''' - Returns parameters used to create a container - ''' - create_params = dict( - command='command', - domainname='domainname', - hostname='hostname', - user='user', - detach='detach', - stdin_open='interactive', - tty='tty', - ports='ports', - environment='env', - name='name', - entrypoint='entrypoint', - mac_address='mac_address', - labels='labels', - stop_signal='stop_signal', - working_dir='working_dir', - stop_timeout='stop_timeout', - healthcheck='healthcheck', - ) - - if self.client.docker_py_version < LooseVersion('3.0'): - # cpu_shares and volume_driver moved to create_host_config in > 3 - create_params['cpu_shares'] = 'cpu_shares' - create_params['volume_driver'] = 'volume_driver' - - result = dict( - host_config=self._host_config(), - volumes=self._get_mounts(), - ) - - for key, value in create_params.items(): - if getattr(self, value, None) is not None: - if self.client.option_minimal_versions[value]['supported']: - result[key] = getattr(self, value) - - if self.disable_healthcheck: - # Make sure image's health check is overridden - result['healthcheck'] = {'test': ['NONE']} - - if self.networks_cli_compatible and self.networks: - network = self.networks[0] - params = dict() - for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): - if network.get(para): - params[para] = network[para] - network_config = dict() - network_config[network['name']] = self.client.create_endpoint_config(**params) - result['networking_config'] = self.client.create_networking_config(network_config) - return result - - def _expand_host_paths(self): - new_vols = [] - for vol in self.volumes: - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - if re.match(r'[.~]', host): - host = os.path.abspath(os.path.expanduser(host)) - new_vols.append("%s:%s:%s" % (host, container, mode)) - continue - elif len(parts) == 2: - if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): - host = os.path.abspath(os.path.expanduser(parts[0])) - new_vols.append("%s:%s:rw" % (host, parts[1])) - continue - new_vols.append(vol) - return new_vols - - def _get_mounts(self): - ''' - Return a list of container mounts. - :return: - ''' - result = [] - if self.volumes: - for vol in self.volumes: - # Only pass anonymous volumes to create container - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - continue - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - continue - result.append(vol) - self.log("mounts:") - self.log(result, pretty_print=True) - return result - - def _host_config(self): - ''' - Returns parameters used to create a HostConfig object - ''' - - host_config_params = dict( - port_bindings='published_ports', - publish_all_ports='publish_all_ports', - links='links', - privileged='privileged', - cgroup_parent='cgroup_parent', - dns='dns_servers', - dns_opt='dns_opts', - dns_search='dns_search_domains', - binds='volume_binds', - volumes_from='volumes_from', - network_mode='network_mode', - userns_mode='userns_mode', - cap_add='capabilities', - cap_drop='cap_drop', - extra_hosts='etc_hosts', - read_only='read_only', - ipc_mode='ipc_mode', - security_opt='security_opts', - ulimits='ulimits', - sysctls='sysctls', - log_config='log_config', - mem_limit='memory', - memswap_limit='memory_swap', - mem_swappiness='memory_swappiness', - oom_score_adj='oom_score_adj', - oom_kill_disable='oom_killer', - shm_size='shm_size', - group_add='groups', - devices='devices', - pid_mode='pid_mode', - tmpfs='tmpfs', - init='init', - uts_mode='uts', - runtime='runtime', - auto_remove='auto_remove', - device_read_bps='device_read_bps', - device_write_bps='device_write_bps', - device_read_iops='device_read_iops', - device_write_iops='device_write_iops', - pids_limit='pids_limit', - mounts='mounts', - nano_cpus='cpus', - storage_opt='storage_opts', - ) - - if self.client.docker_py_version >= LooseVersion('1.9'): - # blkio_weight can always be updated, but can only be set on creation - # when Docker SDK for Python and Docker API are new enough - host_config_params['blkio_weight'] = 'blkio_weight' - - if self.client.docker_py_version >= LooseVersion('3.0'): - # cpu_shares and volume_driver moved to create_host_config in > 3 - host_config_params['cpu_shares'] = 'cpu_shares' - host_config_params['volume_driver'] = 'volume_driver' - - params = dict() - for key, value in host_config_params.items(): - if getattr(self, value, None) is not None: - if self.client.option_minimal_versions[value]['supported']: - params[key] = getattr(self, value) - - if self.restart_policy: - params['restart_policy'] = dict(Name=self.restart_policy, - MaximumRetryCount=self.restart_retries) - - if 'mounts' in params: - params['mounts'] = self.mounts_opt - - if self.device_requests is not None: - params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests] - - return self.client.create_host_config(**params) - - def get_default_host_ip(self): - if self.default_host_ip is not None: - return self.default_host_ip - ip = '0.0.0.0' - if not self.networks: - return ip - for net in self.networks: - if net.get('name'): - try: - network = self.client.inspect_network(net['name']) - if network.get('Driver') == 'bridge' and \ - network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): - ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] - break - except NotFound as nfe: - self.client.fail( - "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], to_native(nfe)), - exception=traceback.format_exc() - ) - return ip - - def _parse_publish_ports(self): - ''' - Parse ports from docker CLI syntax - ''' - if self.published_ports is None: - return None - - if 'all' in self.published_ports: - self.client.module.fail_json( - msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' - 'to randomly assign port mappings for those not specified by published_ports.') - - default_ip = self.get_default_host_ip() - - binds = {} - for port in self.published_ports: - parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client) - container_port = parts[-1] - protocol = '' - if '/' in container_port: - container_port, protocol = parts[-1].split('/') - container_ports = parse_port_range(container_port, self.client) - - p_len = len(parts) - if p_len == 1: - port_binds = len(container_ports) * [(default_ip,)] - elif p_len == 2: - if len(container_ports) == 1: - port_binds = [(default_ip, parts[0])] - else: - port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)] - elif p_len == 3: - # We only allow IPv4 and IPv6 addresses for the bind address - ipaddr = parts[0] - if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): - self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' - 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr)) - if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): - ipaddr = ipaddr[1:-1] - if parts[1]: - if len(container_ports) == 1: - port_binds = [(ipaddr, parts[1])] - else: - port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)] - else: - port_binds = len(container_ports) * [(ipaddr,)] - else: - self.fail(('Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' - 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?') % (port, p_len)) - - for bind, container_port in zip(port_binds, container_ports): - idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port - if idx in binds: - old_bind = binds[idx] - if isinstance(old_bind, list): - old_bind.append(bind) - else: - binds[idx] = [old_bind, bind] - else: - binds[idx] = bind - return binds - - def _get_volume_binds(self, volumes): - ''' - Extract host bindings, if any, from list of volume mapping strings. - - :return: dictionary of bind mappings - ''' - result = dict() - if volumes: - for vol in volumes: - host = None - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - elif len(parts) == 2: - if not is_volume_permissions(parts[1]): - host, container, mode = (parts + ['rw']) - if host is not None: - result[host] = dict( - bind=container, - mode=mode - ) - return result - - def _parse_exposed_ports(self, published_ports): - ''' - Parse exposed ports from docker CLI-style ports syntax. - ''' - exposed = [] - if self.exposed_ports: - for port in self.exposed_ports: - port = to_text(port, errors='surrogate_or_strict').strip() - protocol = 'tcp' - match = re.search(r'(/.+$)', port) - if match: - protocol = match.group(1).replace('/', '') - port = re.sub(r'/.+$', '', port) - exposed.append((port, protocol)) - if published_ports: - # Any published port should also be exposed - for publish_port in published_ports: - match = False - if isinstance(publish_port, string_types) and '/' in publish_port: - port, protocol = publish_port.split('/') - port = int(port) - else: - protocol = 'tcp' - port = int(publish_port) - for exposed_port in exposed: - if exposed_port[1] != protocol: - continue - if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: - start_port, end_port = exposed_port[0].split('-') - if int(start_port) <= port <= int(end_port): - match = True - elif exposed_port[0] == port: - match = True - if not match: - exposed.append((port, protocol)) - return exposed - - @staticmethod - def _parse_links(links): - ''' - Turn links into a dictionary - ''' - if links is None: - return None - - result = [] - for link in links: - parsed_link = link.split(':', 1) - if len(parsed_link) == 2: - result.append((parsed_link[0], parsed_link[1])) - else: - result.append((parsed_link[0], parsed_link[0])) - return result - - def _parse_ulimits(self): - ''' - Turn ulimits into an array of Ulimit objects - ''' - if self.ulimits is None: - return None - - results = [] - for limit in self.ulimits: - limits = dict() - pieces = limit.split(':') - if len(pieces) >= 2: - limits['name'] = pieces[0] - limits['soft'] = int(pieces[1]) - limits['hard'] = int(pieces[1]) - if len(pieces) == 3: - limits['hard'] = int(pieces[2]) - try: - results.append(Ulimit(**limits)) - except ValueError as exc: - self.fail("Error parsing ulimits value %s - %s" % (limit, to_native(exc))) - return results +from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound - def _parse_sysctls(self): - ''' - Turn sysctls into an hash of Sysctl objects - ''' - return self.sysctls - - def _parse_log_config(self): - ''' - Create a LogConfig object - ''' - if self.log_driver is None: - return None - - options = dict( - Type=self.log_driver, - Config=dict() - ) - - if self.log_options is not None: - options['Config'] = dict() - for k, v in self.log_options.items(): - if not isinstance(v, string_types): - self.client.module.warn( - "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " - "If this is not correct, or you want to avoid such warnings, please quote the value." % ( - k, to_text(v, errors='surrogate_or_strict')) - ) - v = to_text(v, errors='surrogate_or_strict') - self.log_options[k] = v - options['Config'][k] = v - - try: - return LogConfig(**options) - except ValueError as exc: - self.fail('Error parsing logging options - %s' % (to_native(exc), )) - - def _parse_tmpfs(self): - ''' - Turn tmpfs into a hash of Tmpfs objects - ''' - result = dict() - if self.tmpfs is None: - return result - - for tmpfs_spec in self.tmpfs: - split_spec = tmpfs_spec.split(":", 1) - if len(split_spec) > 1: - result[split_spec[0]] = split_spec[1] - else: - result[split_spec[0]] = "" - return result - - def _get_environment(self): - """ - If environment file is combined with explicit environment variables, the explicit environment variables - take precedence. - """ - final_env = {} - if self.env_file: - parsed_env_file = utils.parse_env_file(self.env_file) - for name, value in parsed_env_file.items(): - final_env[name] = to_text(value, errors='surrogate_or_strict') - if self.env: - for name, value in self.env.items(): - if not isinstance(value, string_types): - self.fail("Non-string value found for env option. Ambiguous env options must be " - "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) - final_env[name] = to_text(value, errors='surrogate_or_strict') - return final_env - - def _get_network_id(self, network_name): - network_id = None - try: - for network in self.client.networks(names=[network_name]): - if network['Name'] == network_name: - network_id = network['Id'] - break - except Exception as exc: - self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) - return network_id - - def _process_mounts(self): - if self.mounts is None: - return None, None - mounts_list = [] - mounts_expected = [] - for mount in self.mounts: - target = mount['target'] - datatype = mount['type'] - mount_dict = dict(mount) - # Sanity checks (so we don't wait for Docker SDK for Python to barf on input) - if mount_dict.get('source') is None and datatype not in ('tmpfs', 'volume'): - self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype)) - mount_option_types = dict( - volume_driver='volume', - volume_options='volume', - propagation='bind', - no_copy='volume', - labels='volume', - tmpfs_size='tmpfs', - tmpfs_mode='tmpfs', - ) - for option, req_datatype in mount_option_types.items(): - if mount_dict.get(option) is not None and datatype != req_datatype: - self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype)) - # Handle volume_driver and volume_options - volume_driver = mount_dict.pop('volume_driver') - volume_options = mount_dict.pop('volume_options') - if volume_driver: - if volume_options: - volume_options = clean_dict_booleans_for_docker_api(volume_options) - mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options) - if mount_dict['labels']: - mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) - if mount_dict.get('tmpfs_size') is not None: - try: - mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) - except ValueError as exc: - self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) - if mount_dict.get('tmpfs_mode') is not None: - try: - mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) - except Exception as dummy: - self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) - # Fill expected mount dict - mount_expected = dict(mount) - mount_expected['tmpfs_size'] = mount_dict['tmpfs_size'] - mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode'] - # Add result to lists - mounts_list.append(docker_types.Mount(**mount_dict)) - mounts_expected.append(omit_none_from_dict(mount_expected)) - return mounts_list, mounts_expected - - def _process_rate_bps(self, option): - """ - Format device_read_bps and device_write_bps option - """ - devices_list = [] - for v in getattr(self, option): - device_dict = dict((x.title(), y) for x, y in v.items()) - device_dict['Rate'] = human_to_bytes(device_dict['Rate']) - devices_list.append(device_dict) - - setattr(self, option, devices_list) - - def _process_rate_iops(self, option): - """ - Format device_read_iops and device_write_iops option - """ - devices_list = [] - for v in getattr(self, option): - device_dict = dict((x.title(), y) for x, y in v.items()) - devices_list.append(device_dict) - - setattr(self, option, devices_list) - - def _replace_container_names(self, mode): - """ - Parse IPC and PID modes. If they contain a container name, replace - with the container's ID. - """ - if mode is None or not mode.startswith('container:'): - return mode - container_name = mode[len('container:'):] - # Try to inspect container to see whether this is an ID or a - # name (and in the latter case, retrieve it's ID) - container = self.client.get_container(container_name) - if container is None: - # If we can't find the container, issue a warning and continue with - # what the user specified. - self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) - return mode - return 'container:{0}'.format(container['Id']) - - def _check_mount_target_collisions(self): - last = dict() - - def f(t, name): - if t in last: - if name == last[t]: - self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name)) - else: - self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) - last[t] = name - - if self.expected_mounts: - for t in [m['target'] for m in self.expected_mounts]: - f(t, 'mounts') - if self.volumes: - for v in self.volumes: - vs = v.split(':') - f(vs[0 if len(vs) == 1 else 1], 'volumes') +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag, normalize_links class Container(DockerBaseClass): - - def __init__(self, container, parameters): + def __init__(self, container): super(Container, self).__init__() self.raw = container self.Id = None + self.Image = None self.container = container if container: self.Id = container['Id'] self.Image = container['Image'] self.log(self.container, pretty_print=True) - self.parameters = parameters - self.parameters.expected_links = None - self.parameters.expected_ports = None - self.parameters.expected_exposed = None - self.parameters.expected_volumes = None - self.parameters.expected_ulimits = None - self.parameters.expected_sysctls = None - self.parameters.expected_etc_hosts = None - self.parameters.expected_env = None - self.parameters.expected_device_requests = None - self.parameters_map = dict() - self.parameters_map['expected_labels'] = 'labels' - self.parameters_map['expected_links'] = 'links' - self.parameters_map['expected_ports'] = 'expected_ports' - self.parameters_map['expected_exposed'] = 'exposed_ports' - self.parameters_map['expected_volumes'] = 'volumes' - self.parameters_map['expected_ulimits'] = 'ulimits' - self.parameters_map['expected_sysctls'] = 'sysctls' - self.parameters_map['expected_etc_hosts'] = 'etc_hosts' - self.parameters_map['expected_env'] = 'env' - self.parameters_map['expected_entrypoint'] = 'entrypoint' - self.parameters_map['expected_binds'] = 'volumes' - self.parameters_map['expected_labels'] = 'labels' - self.parameters_map['expected_cmd'] = 'command' - self.parameters_map['expected_devices'] = 'devices' - self.parameters_map['expected_healthcheck'] = 'healthcheck' - self.parameters_map['expected_mounts'] = 'mounts' - self.parameters_map['expected_device_requests'] = 'device_requests' - - def fail(self, msg): - self.parameters.client.fail(msg) @property def exists(self): @@ -2199,619 +1272,187 @@ def paused(self): return self.container['State'].get('Paused', False) return False - def _compare(self, a, b, compare): - ''' - Compare values a and b as described in compare. - ''' - return compare_generic(a, b, compare['comparison'], compare['type']) - - def _decode_mounts(self, mounts): - if not mounts: - return mounts - result = [] - empty_dict = dict() - for mount in mounts: - res = dict() - res['type'] = mount.get('Type') - res['source'] = mount.get('Source') - res['target'] = mount.get('Target') - res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False - res['consistency'] = mount.get('Consistency') - res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation') - res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False) - res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict) - res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name') - res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict) - res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes') - res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode') - result.append(res) - return result - - def has_different_configuration(self, image): - ''' - Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) - ''' - self.log('Starting has_different_configuration') - self.parameters.expected_entrypoint = self._get_expected_entrypoint() - self.parameters.expected_links = self._get_expected_links() - self.parameters.expected_ports = self._get_expected_ports() - self.parameters.expected_exposed = self._get_expected_exposed(image) - self.parameters.expected_volumes = self._get_expected_volumes(image) - self.parameters.expected_binds = self._get_expected_binds(image) - self.parameters.expected_labels = self._get_expected_labels(image) - self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) - self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls) - self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') - self.parameters.expected_env = self._get_expected_env(image) - self.parameters.expected_cmd = self._get_expected_cmd() - self.parameters.expected_devices = self._get_expected_devices() - self.parameters.expected_healthcheck = self._get_expected_healthcheck() - self.parameters.expected_device_requests = self._get_expected_device_requests() - - if not self.container.get('HostConfig'): - self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") - if not self.container.get('Config'): - self.fail("has_config_diff: Error parsing container properties. Config missing.") - if not self.container.get('NetworkSettings'): - self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") - - host_config = self.container['HostConfig'] - log_config = host_config.get('LogConfig', dict()) - config = self.container['Config'] - network = self.container['NetworkSettings'] - - # The previous version of the docker module ignored the detach state by - # assuming if the container was running, it must have been detached. - detach = not (config.get('AttachStderr') and config.get('AttachStdout')) - - # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 - if config.get('ExposedPorts') is not None: - expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] - else: - expected_exposed = [] - - # Map parameters to container inspect results - config_mapping = dict( - expected_cmd=config.get('Cmd'), - domainname=config.get('Domainname'), - hostname=config.get('Hostname'), - user=config.get('User'), - detach=detach, - init=host_config.get('Init'), - interactive=config.get('OpenStdin'), - capabilities=host_config.get('CapAdd'), - cap_drop=host_config.get('CapDrop'), - cgroup_parent=host_config.get('CgroupParent'), - expected_devices=host_config.get('Devices'), - dns_servers=host_config.get('Dns'), - dns_opts=host_config.get('DnsOptions'), - dns_search_domains=host_config.get('DnsSearch'), - expected_env=(config.get('Env') or []), - expected_entrypoint=config.get('Entrypoint'), - expected_etc_hosts=host_config['ExtraHosts'], - expected_exposed=expected_exposed, - groups=host_config.get('GroupAdd'), - ipc_mode=host_config.get("IpcMode"), - expected_labels=config.get('Labels'), - expected_links=host_config.get('Links'), - mac_address=config.get('MacAddress', network.get('MacAddress')), - memory_swappiness=host_config.get('MemorySwappiness'), - network_mode=host_config.get('NetworkMode'), - userns_mode=host_config.get('UsernsMode'), - oom_killer=host_config.get('OomKillDisable'), - oom_score_adj=host_config.get('OomScoreAdj'), - pid_mode=host_config.get('PidMode'), - privileged=host_config.get('Privileged'), - expected_ports=host_config.get('PortBindings'), - read_only=host_config.get('ReadonlyRootfs'), - runtime=host_config.get('Runtime'), - shm_size=host_config.get('ShmSize'), - security_opts=host_config.get("SecurityOpt"), - stop_signal=config.get("StopSignal"), - tmpfs=host_config.get('Tmpfs'), - tty=config.get('Tty'), - expected_ulimits=host_config.get('Ulimits'), - expected_sysctls=host_config.get('Sysctls'), - uts=host_config.get('UTSMode'), - expected_volumes=config.get('Volumes'), - expected_binds=host_config.get('Binds'), - volume_driver=host_config.get('VolumeDriver'), - volumes_from=host_config.get('VolumesFrom'), - working_dir=config.get('WorkingDir'), - publish_all_ports=host_config.get('PublishAllPorts'), - expected_healthcheck=config.get('Healthcheck'), - disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']), - device_read_bps=host_config.get('BlkioDeviceReadBps'), - device_write_bps=host_config.get('BlkioDeviceWriteBps'), - device_read_iops=host_config.get('BlkioDeviceReadIOps'), - device_write_iops=host_config.get('BlkioDeviceWriteIOps'), - expected_device_requests=host_config.get('DeviceRequests'), - pids_limit=host_config.get('PidsLimit'), - storage_opts=host_config.get('StorageOpt'), - # According to https://github.com/moby/moby/, support for HostConfig.Mounts - # has been included at least since v17.03.0-ce, which has API version 1.26. - # The previous tag, v1.9.1, has API version 1.21 and does not have - # HostConfig.Mounts. I have no idea what about API 1.25... - expected_mounts=self._decode_mounts(host_config.get('Mounts')), - cpus=host_config.get('NanoCpus'), - ) - # Options which don't make sense without their accompanying option - if self.parameters.log_driver: - config_mapping['log_driver'] = log_config.get('Type') - config_mapping['log_options'] = log_config.get('Config') - - if self.parameters.client.option_minimal_versions['auto_remove']['supported']: - # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately - # it has a default value, that's why we have to jump through the hoops here - config_mapping['auto_remove'] = host_config.get('AutoRemove') - - if self.parameters.client.option_minimal_versions['stop_timeout']['supported']: - # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that - # stop_timeout has a hybrid role, in that it used to be something only used - # for stopping containers, and is now also used as a container property. - # That's why it needs special handling here. - config_mapping['stop_timeout'] = config.get('StopTimeout') - - differences = DifferenceTracker() - for key, value in config_mapping.items(): - minimal_version = self.parameters.client.option_minimal_versions.get(key, {}) - if not minimal_version.get('supported', True): - continue - compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] - self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare)) - if getattr(self.parameters, key, None) is not None: - match = self._compare(getattr(self.parameters, key), value, compare) - - if not match: - if key == 'expected_healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: - # If the healthcheck is disabled (both in parameters and for the current container), and the user - # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the - # expected_healthcheck comparison in this case. - continue - - if key == 'expected_labels' and compare['comparison'] == 'strict' and self.parameters.image_label_mismatch == 'fail': - # If there are labels from the base image that should be removed and - # base_image_mismatch is fail we want raise an error. - image_labels = self._get_image_labels(image) - would_remove_labels = [] - for label in image_labels: - if label not in self.parameters.labels: - # Format label for error message - would_remove_labels.append(label) - if would_remove_labels: - msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" - " this error. Labels: {0}") - self.fail(msg.format(', '.join(['"%s"' % label for label in would_remove_labels]))) - - # no match. record the differences - p = getattr(self.parameters, key) - c = value - if compare['type'] == 'set': - # Since the order does not matter, sort so that the diff output is better. - if p is not None: - p = sorted(p) - if c is not None: - c = sorted(c) - elif compare['type'] == 'set(dict)': - # Since the order does not matter, sort so that the diff output is better. - if key == 'expected_mounts': - # For selected values, use one entry as key - def sort_key_fn(x): - return x['target'] - else: - # We sort the list of dictionaries by using the sorted items of a dict as its key. - def sort_key_fn(x): - return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) - if p is not None: - p = sorted(p, key=sort_key_fn) - if c is not None: - c = sorted(c, key=sort_key_fn) - differences.add(key, parameter=p, active=c) - - has_differences = not differences.empty - return has_differences, differences - - def has_different_resource_limits(self): - ''' - Diff parameters and container resource limits - ''' - if not self.container.get('HostConfig'): - self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") - - host_config = self.container['HostConfig'] - - restart_policy = host_config.get('RestartPolicy') or dict() - - config_mapping = dict( - blkio_weight=host_config.get('BlkioWeight'), - cpu_period=host_config.get('CpuPeriod'), - cpu_quota=host_config.get('CpuQuota'), - cpu_shares=host_config.get('CpuShares'), - cpuset_cpus=host_config.get('CpusetCpus'), - cpuset_mems=host_config.get('CpusetMems'), - kernel_memory=host_config.get("KernelMemory"), - memory=host_config.get('Memory'), - memory_reservation=host_config.get('MemoryReservation'), - memory_swap=host_config.get('MemorySwap'), - restart_policy=restart_policy.get('Name') - ) - - # Options which don't make sense without their accompanying option - if self.parameters.restart_policy: - config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') - - differences = DifferenceTracker() - for key, value in config_mapping.items(): - if getattr(self.parameters, key, None): - compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] - match = self._compare(getattr(self.parameters, key), value, compare) - - if not match: - # no match. record the differences - differences.add(key, parameter=getattr(self.parameters, key), active=value) - different = not differences.empty - return different, differences - - def has_network_differences(self): - ''' - Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 - ''' - different = False - differences = [] - - if not self.parameters.networks: - return different, differences - - if not self.container.get('NetworkSettings'): - self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = self.container['NetworkSettings']['Networks'] - for network in self.parameters.networks: - network_info = connected_networks.get(network['name']) - if network_info is None: - different = True - differences.append(dict( - parameter=network, - container=None - )) - else: - diff = False - network_info_ipam = network_info.get('IPAMConfig') or {} - if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): - diff = True - if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): - diff = True - if network.get('aliases'): - if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): - diff = True - if network.get('links'): - expected_links = [] - for link, alias in network['links']: - expected_links.append("%s:%s" % (link, alias)) - if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): - diff = True - if diff: - different = True - differences.append(dict( - parameter=network, - container=dict( - name=network['name'], - ipv4_address=network_info_ipam.get('IPv4Address'), - ipv6_address=network_info_ipam.get('IPv6Address'), - aliases=network_info.get('Aliases'), - links=network_info.get('Links') - ) - )) - return different, differences - - def has_extra_networks(self): - ''' - Check if the container is connected to non-requested networks - ''' - extra_networks = [] - extra = False - - if not self.container.get('NetworkSettings'): - self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = self.container['NetworkSettings'].get('Networks') - if connected_networks: - for network, network_config in connected_networks.items(): - keep = False - if self.parameters.networks: - for expected_network in self.parameters.networks: - if expected_network['name'] == network: - keep = True - if not keep: - extra = True - extra_networks.append(dict(name=network, id=network_config['NetworkID'])) - return extra, extra_networks - - def _get_expected_devices(self): - if not self.parameters.devices: - return None - expected_devices = [] - for device in self.parameters.devices: - parts = device.split(':') - if len(parts) == 1: - expected_devices.append( - dict( - CgroupPermissions='rwm', - PathInContainer=parts[0], - PathOnHost=parts[0] - )) - elif len(parts) == 2: - parts = device.split(':') - expected_devices.append( - dict( - CgroupPermissions='rwm', - PathInContainer=parts[1], - PathOnHost=parts[0] - ) - ) - else: - expected_devices.append( - dict( - CgroupPermissions=parts[2], - PathInContainer=parts[1], - PathOnHost=parts[0] - )) - return expected_devices - - def _get_expected_entrypoint(self): - if self.parameters.client.module.params['command_handling'] != 'correct' and not self.parameters.entrypoint: - return None - return self.parameters.entrypoint - - def _get_expected_ports(self): - if self.parameters.published_ports is None: - return None - expected_bound_ports = {} - for container_port, config in self.parameters.published_ports.items(): - if isinstance(container_port, int): - container_port = "%s/tcp" % container_port - if len(config) == 1: - if isinstance(config[0], int): - expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] - else: - expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] - elif isinstance(config[0], tuple): - expected_bound_ports[container_port] = [] - for host_ip, host_port in config: - expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')}) - else: - expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}] - return expected_bound_ports - - def _get_expected_links(self): - if self.parameters.links is None: - return None - self.log('parameter links:') - self.log(self.parameters.links, pretty_print=True) - exp_links = [] - for link, alias in self.parameters.links: - exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) - return exp_links - - def _get_expected_binds(self, image): - self.log('_get_expected_binds') - image_vols = [] - if image: - image_vols = self._get_image_binds(image['Config'].get('Volumes')) - param_vols = [] - if self.parameters.volumes: - for vol in self.parameters.volumes: - host = None - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - host, container, mode = parts + ['rw'] - if host: - param_vols.append("%s:%s:%s" % (host, container, mode)) - result = list(set(image_vols + param_vols)) - self.log("expected_binds:") - self.log(result, pretty_print=True) - return result - - def _get_expected_labels(self, image): - if self.parameters.labels is None: - return None - if self.parameters.image_label_mismatch == 'ignore': - expected_labels = dict(self._get_image_labels(image)) - else: - expected_labels = {} - expected_labels.update(self.parameters.labels) - return expected_labels - - def _get_image_labels(self, image): - if not image: - return {} - - # Can't use get('Labels', {}) because 'Labels' may be present and be None - return image['Config'].get('Labels') or {} - - def _get_expected_device_requests(self): - if self.parameters.device_requests is None: - return None - device_requests = [] - for dr in self.parameters.device_requests: - device_requests.append({ - 'Driver': dr['driver'], - 'Count': dr['count'], - 'DeviceIDs': dr['device_ids'], - 'Capabilities': dr['capabilities'], - 'Options': dr['options'], - }) - return device_requests - - def _get_image_binds(self, volumes): - ''' - Convert array of binds to array of strings with format host_path:container_path:mode - - :param volumes: array of bind dicts - :return: array of strings - ''' - results = [] - if isinstance(volumes, dict): - results += self._get_bind_from_dict(volumes) - elif isinstance(volumes, list): - for vol in volumes: - results += self._get_bind_from_dict(vol) - return results - - @staticmethod - def _get_bind_from_dict(volume_dict): - results = [] - if volume_dict: - for host_path, config in volume_dict.items(): - if isinstance(config, dict) and config.get('bind'): - container_path = config.get('bind') - mode = config.get('mode', 'rw') - results.append("%s:%s:%s" % (host_path, container_path, mode)) - return results - - def _get_expected_volumes(self, image): - self.log('_get_expected_volumes') - expected_vols = dict() - if image and image['Config'].get('Volumes'): - expected_vols.update(image['Config'].get('Volumes')) - - if self.parameters.volumes: - for vol in self.parameters.volumes: - # We only expect anonymous volumes to show up in the list - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - continue - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - continue - expected_vols[vol] = dict() - - if not expected_vols: - expected_vols = None - self.log("expected_volumes:") - self.log(expected_vols, pretty_print=True) - return expected_vols - - def _get_expected_env(self, image): - self.log('_get_expected_env') - expected_env = dict() - if image and image['Config'].get('Env'): - for env_var in image['Config']['Env']: - parts = env_var.split('=', 1) - expected_env[parts[0]] = parts[1] - if self.parameters.env: - expected_env.update(self.parameters.env) - param_env = [] - for key, value in expected_env.items(): - param_env.append("%s=%s" % (key, value)) - return param_env - - def _get_expected_exposed(self, image): - self.log('_get_expected_exposed') - image_ports = [] - if image: - image_exposed_ports = image['Config'].get('ExposedPorts') or {} - image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()] - param_ports = [] - if self.parameters.ports: - param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports] - result = list(set(image_ports + param_ports)) - self.log(result, pretty_print=True) - return result - - def _get_expected_ulimits(self, config_ulimits): - self.log('_get_expected_ulimits') - if config_ulimits is None: - return None - results = [] - for limit in config_ulimits: - results.append(dict( - Name=limit.name, - Soft=limit.soft, - Hard=limit.hard - )) - return results - - def _get_expected_sysctls(self, config_sysctls): - self.log('_get_expected_sysctls') - if config_sysctls is None: - return None - result = dict() - for key, value in config_sysctls.items(): - result[key] = to_text(value, errors='surrogate_or_strict') - return result - - def _get_expected_cmd(self): - self.log('_get_expected_cmd') - if self.parameters.client.module.params['command_handling'] != 'correct' and not self.parameters.command: - return None - return self.parameters.command - - def _convert_simple_dict_to_list(self, param_name, join_with=':'): - if getattr(self.parameters, param_name, None) is None: - return None - results = [] - for key, value in getattr(self.parameters, param_name).items(): - results.append("%s%s%s" % (key, join_with, value)) - return results - - def _normalize_port(self, port): - if '/' not in port: - return port + '/tcp' - return port - - def _get_expected_healthcheck(self): - self.log('_get_expected_healthcheck') - expected_healthcheck = dict() - - if self.parameters.healthcheck: - expected_healthcheck.update([(k.title().replace("_", ""), v) - for k, v in self.parameters.healthcheck.items()]) - - return expected_healthcheck - class ContainerManager(DockerBaseClass): - ''' - Perform container management tasks - ''' - - def __init__(self, client): - - super(ContainerManager, self).__init__() - - if client.module.params.get('log_options') and not client.module.params.get('log_driver'): - client.module.warn('log_options is ignored when log_driver is not specified') - if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'): - client.module.warn('healthcheck is ignored when test is not specified') - if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'): - client.module.warn('restart_retries is ignored when restart_policy is not specified') - + def __init__(self, module, client, active_options): self.client = client - self.parameters = TaskParameters(client) - self.check_mode = self.client.check_mode + self.options = active_options + self.all_options = self._collect_all_options(active_options) + self.module = module + self.check_mode = self.module.check_mode + self.param_cleanup = self.module.params['cleanup'] + self.param_container_default_behavior = self.module.params['container_default_behavior'] + self.param_default_host_ip = self.module.params['default_host_ip'] + self.param_debug = self.module.params['debug'] + self.param_force_kill = self.module.params['force_kill'] + self.param_image = self.module.params['image'] + self.param_image_label_mismatch = self.module.params['image_label_mismatch'] + self.param_keep_volumes = self.module.params['keep_volumes'] + self.param_kill_signal = self.module.params['kill_signal'] + self.param_name = self.module.params['name'] + self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] + self.param_output_logs = self.module.params['output_logs'] + self.param_paused = self.module.params['paused'] + self.param_pull = self.module.params['pull'] + self.param_purge_networks = self.module.params['purge_networks'] + self.param_recreate = self.module.params['recreate'] + self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] + self.param_restart = self.module.params['restart'] + self.param_state = self.module.params['state'] + self._parse_comparisons() + self._update_params() + self.parameters = self._collect_params(active_options) self.results = {'changed': False, 'actions': []} self.diff = {} self.diff_tracker = DifferenceTracker() self.facts = {} + if self.param_default_host_ip: + valid_ip = False + if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): + self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) + valid_ip = True + if not valid_ip: + self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' + 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) + + + def _collect_all_options(self, active_options): + all_options = {} + for options in active_options: + for option in options.options: + all_options[option.name] = option + for option in [ + Option('image', 'str', None), + Option('networks', 'set', None, elements='dict', ansible_suboptions={}), + ]: + all_options[option.name] = option + return all_options + + def _collect_all_module_params(self): + all_module_options = set() + for option, data in self.module.argument_spec.items(): + all_module_options.add(option) + if 'aliases' in data: + for alias in data['aliases']: + all_module_options.add(alias) + return all_module_options + + def _parse_comparisons(self): + # Keep track of all module params and all option aliases + all_module_options = self._collect_all_module_params() + comp_aliases = {} + for option_name, option in self.all_options.items(): + if option.not_an_ansible_option: + continue + comp_aliases[option_name] = option_name + for alias in option.ansible_aliases: + comp_aliases[alias] = option_name + # Process legacy ignore options + if self.module.params['ignore_image']: + self.all_options['image'].comparison = 'ignore' + if self.param_purge_networks: + self.all_options['networks'].comparison = 'strict' + # Process comparsions specified by user + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option in self.all_options.values(): + if option.name == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + option.comparison = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_module_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + if key not in self.all_options or self.all_options[key].not_an_ansible_option: + self.fail("Unknown module option '%s' in comparisons dict!" % key) + key_main = key + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + self.all_options[key_main].comparison = value + elif value == 'allow_more_present': + if self.all_options[key_main].comparison_type == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + self.all_options[key_main].comparison = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Copy values + for option in self.all_options.values(): + if option.copy_comparison_from is not None: + option.comparison = self.all_options[option.copy_comparison_from].comparison + # Check legacy values + if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + + def _update_params(self): + if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + self.module.params['network_mode'] = self.module.params['networks'][0]['name'] + if self.param_container_default_behavior == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory='0', + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value - state = self.parameters.state - if state in ('stopped', 'started', 'present'): - self.present(state) - elif state == 'absent': + def _collect_params(self, active_options): + parameters = [] + for options in active_options: + values = {} + engine = options.get_engine('docker_api') + for option in options.options: + if not option.not_an_ansible_option and self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) + parameters.append((options, values)) + return parameters + + def fail(self, *args, **kwargs): + self.client.fail(*args, **kwargs) + + def run(self): + if self.param_state in ('stopped', 'started', 'present'): + self.present(self.param_state) + elif self.param_state == 'absent': self.absent() - if not self.check_mode and not self.parameters.debug: + if not self.check_mode and not self.param_debug: self.results.pop('actions') - if self.client.module._diff or self.parameters.debug: + if self.module._diff or self.param_debug: self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() self.results['diff'] = self.diff @@ -2852,7 +1493,7 @@ def wait_for_state(self, container_id, complete_states=None, wait_states=None, a delay = min(delay * 1.1, 10) def present(self, state): - container = self._get_container(self.parameters.name) + container = self._get_container(self.param_name) was_running = container.running was_paused = container.paused container_created = False @@ -2870,31 +1511,31 @@ def present(self, state): self.log('Found container in removal phase') else: self.log('No container found') - if not self.parameters.image: + if not self.param_image: self.fail('Cannot create container when image is not specified!') self.diff_tracker.add('exists', parameter=True, active=False) if container.removing and not self.check_mode: # Wait for container to be removed before trying to create it self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) - new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(self.param_image) if new_container: container = new_container container_created = True else: # Existing container - different, differences = container.has_different_configuration(image) + different, differences = self.has_different_configuration(container, image) image_different = False - if self.parameters.comparisons['image']['comparison'] == 'strict': + if self.all_options['image'].comparison == 'strict': image_different = self._image_is_different(image, container) - if image_different or different or self.parameters.recreate: + if image_different or different or self.param_recreate: self.diff_tracker.merge(differences) self.diff['differences'] = differences.get_legacy_docker_container_diffs() if image_different: self.diff['image_different'] = True self.log("differences") self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) - image_to_use = self.parameters.image + image_to_use = self.param_image if not image_to_use and container and container.Image: image_to_use = container.Image if not image_to_use: @@ -2904,20 +1545,20 @@ def present(self, state): self.container_remove(container.Id) if not self.check_mode: self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) - new_container = self.container_create(image_to_use, self.parameters.create_parameters) + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(image_to_use) if new_container: container = new_container container_created = True if container and container.exists: - container = self.update_limits(container) + container = self.update_limits(container, image) container = self.update_networks(container, container_created) if state == 'started' and not container.running: self.diff_tracker.add('running', parameter=True, active=was_running) container = self.container_start(container.Id) - elif state == 'started' and self.parameters.restart: + elif state == 'started' and self.param_restart: self.diff_tracker.add('running', parameter=True, active=was_running) self.diff_tracker.add('restarted', parameter=True, active=False) container = self.container_restart(container.Id) @@ -2926,26 +1567,26 @@ def present(self, state): self.container_stop(container.Id) container = self._get_container(container.Id) - if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused: - self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused) + if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: + self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) if not self.check_mode: try: - if self.parameters.paused: - self.client.pause(container=container.Id) + if self.param_paused: + self.client.post_call('/containers/{0}/pause', container.Id) else: - self.client.unpause(container=container.Id) + self.client.post_call('/containers/{0}/unpause', container.Id) except Exception as exc: self.fail("Error %s container %s: %s" % ( - "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc) + "pausing" if self.param_paused else "unpausing", container.Id, to_native(exc) )) container = self._get_container(container.Id) self.results['changed'] = True - self.results['actions'].append(dict(set_paused=self.parameters.paused)) + self.results['actions'].append(dict(set_paused=self.param_paused)) self.facts = container.raw def absent(self): - container = self._get_container(self.parameters.name) + container = self._get_container(self.param_name) if container.exists: if container.running: self.diff_tracker.add('running', parameter=False, active=True) @@ -2953,30 +1594,28 @@ def absent(self): self.diff_tracker.add('exists', parameter=False, active=True) self.container_remove(container.Id) - def fail(self, msg, **kwargs): - self.client.fail(msg, **kwargs) - def _output_logs(self, msg): - self.client.module.log(msg=msg) + self.module.log(msg=msg) def _get_container(self, container): ''' Expects container ID or Name. Returns a container object ''' - return Container(self.client.get_container(container), self.parameters) + return Container(self.client.get_container(container)) def _get_image(self): - if not self.parameters.image: + image_parameter = self.param_image + if not image_parameter: self.log('No image specified') return None - if is_image_name_id(self.parameters.image): - image = self.client.find_image_by_id(self.parameters.image) + if is_image_name_id(image_parameter): + image = self.client.find_image_by_id(image_parameter) else: - repository, tag = utils.parse_repository_tag(self.parameters.image) + repository, tag = parse_repository_tag(image_parameter) if not tag: tag = "latest" image = self.client.find_image(repository, tag) - if not image or self.parameters.pull: + if not image or self.param_pull: if not self.check_mode: self.log("Pull the image.") image, alreadyToLatest = self.client.pull_image(repository, tag) @@ -3003,21 +1642,188 @@ def _image_is_different(self, image, container): return True return False - def update_limits(self, container): - limits_differ, different_limits = container.has_different_resource_limits() + def _compose_create_parameters(self, image): + params = { + 'Image': image, + } + for options, values in self.parameters: + engine = options.get_engine('docker_api') + if engine.can_set_value(self.client.docker_api_version): + engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) + return params + + def has_different_configuration(self, container, image): + differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine('docker_api') + container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) + for option in options.options: + if option.name in expected_values: + param_value = expected_values[option.name] + container_value = container_values.get(option.name) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + + if not match: + if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, option, image, container_value, param_value): + continue + # TODO + # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: + # # If the healthcheck is disabled (both in parameters and for the current container), and the user + # # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the + # # expected_healthcheck comparison in this case. + # continue + + # no match. record the differences + p = param_value + c = container_value + if option.comparison_type == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif option.comparison_type == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if option.name == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(option.name, parameter=p, active=c) + + has_differences = not differences.empty + return has_differences, differences + + def has_different_resource_limits(self, container, image): + ''' + Diff parameters and container resource limits + ''' + differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine('docker_api') + if not engine.can_update_value(self.client.docker_api_version): + continue + container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) + for option in options.options: + if option.name in expected_values: + param_value = expected_values[option.name] + container_value = container_values.get(option.name) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + + if not match: + # no match. record the differences + differences.add(option.name, parameter=param_value, active=container_value) + different = not differences.empty + return different, differences + + def _compose_update_parameters(self): + result = {} + for options, values in self.parameters: + engine = options.get_engine('docker_api') + if not engine.can_update_value(self.client.docker_api_version): + continue + engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) + return result + + def update_limits(self, container, image): + limits_differ, different_limits = self.has_different_resource_limits(container, image) if limits_differ: self.log("limit differences:") self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) self.diff_tracker.merge(different_limits) if limits_differ and not self.check_mode: - self.container_update(container.Id, self.parameters.update_parameters) + self.container_update(container.Id, self._compose_update_parameters()) return self._get_container(container.Id) return container + def has_network_differences(self, container): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.module.params['networks']: + return different, differences + + if not container.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings']['Networks'] + for network in self.module.params['networks']: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self, container): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not container.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.module.params['networks']: + for expected_network in self.module.params['networks']: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + def update_networks(self, container, container_created): updated_container = container - if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created: - has_network_differences, network_differences = container.has_network_differences() + if self.all_options['networks'].comparison != 'ignore' or container_created: + has_network_differences, network_differences = self.has_network_differences(container) if has_network_differences: if self.diff.get('differences'): self.diff['differences'].append(dict(network_differences=network_differences)) @@ -3032,8 +1838,8 @@ def update_networks(self, container, container_created): self.results['changed'] = True updated_container = self._add_networks(container, network_differences) - if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks: - has_extra_networks, extra_networks = container.has_extra_networks() + if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: + has_extra_networks, extra_networks = self.has_extra_networks(container) if has_extra_networks: if self.diff.get('differences'): self.diff['differences'].append(dict(purge_networks=extra_networks)) @@ -3055,21 +1861,28 @@ def _add_networks(self, container, differences): self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) if not self.check_mode: try: - self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) + self.client.post_json('/networks/{0}/disconnect', diff['parameter']['id'], data={'Container': container.Id}) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], to_native(exc))) # connect to the network params = dict() - for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): + for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): if diff['parameter'].get(para): - params[para] = diff['parameter'][para] + value = diff['parameter'][para] + if para == 'links': + value = normalize_links(value) + params[dest_para] = value self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) if not self.check_mode: try: self.log("Connecting container to network %s" % diff['parameter']['id']) self.log(params, pretty_print=True) - self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) + data = { + 'Container': container.Id, + 'EndpointConfig': params, + } + self.client.post_json('/networks/{0}/connect', diff['parameter']['id'], data=data) except Exception as exc: self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) return self._get_container(container.Id) @@ -3079,13 +1892,14 @@ def _purge_networks(self, container, networks): self.results['actions'].append(dict(removed_from_network=network['name'])) if not self.check_mode: try: - self.client.disconnect_container_from_network(container.Id, network['name']) + self.client.post_json('/networks/{0}/disconnect', network['name'], data={'Container': container.Id}) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (network['name'], to_native(exc))) return self._get_container(container.Id) - def container_create(self, image, create_parameters): + def container_create(self, image): + create_parameters = self._compose_create_parameters(image) self.log("create container") self.log("image: %s parameters:" % image) self.log(create_parameters, pretty_print=True) @@ -3094,7 +1908,8 @@ def container_create(self, image, create_parameters): new_container = None if not self.check_mode: try: - new_container = self.client.create_container(image, **create_parameters) + params = {'name': self.param_name} + new_container = self.client.post_json_to_json('/containers/create', data=create_parameters, params=params) self.client.report_warnings(new_container) except Exception as exc: self.fail("Error creating container: %s" % to_native(exc)) @@ -3107,34 +1922,39 @@ def container_start(self, container_id): self.results['changed'] = True if not self.check_mode: try: - self.client.start(container=container_id) + self.client.post_json('/containers/{0}/start', container_id) except Exception as exc: self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) - if self.parameters.detach is False: - if self.client.docker_py_version >= LooseVersion('3.0'): - status = self.client.wait(container_id)['StatusCode'] - else: - status = self.client.wait(container_id) + if self.module.params['detach'] is False: + status = self.client.post_json_as_json('/containers/{0}/wait', container_id)['StatusCode'] self.client.fail_results['status'] = status self.results['status'] = status - if self.parameters.auto_remove: + if self.module.params['auto_remove']: output = "Cannot retrieve result as auto_remove is enabled" - if self.parameters.output_logs: - self.client.module.warn('Cannot output_logs if auto_remove is enabled!') + if self.param_output_logs: + self.module.warn('Cannot output_logs if auto_remove is enabled!') else: - config = self.client.inspect_container(container_id) + config = self.client.get_json('/containers/{0}/json', container_id) logging_driver = config['HostConfig']['LogConfig']['Type'] if logging_driver in ('json-file', 'journald', 'local'): - output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) - if self.parameters.output_logs: + params = { + 'stderr': 1, + 'stdout': 1, + 'timestamps': 0, + 'follow': 0, + 'tail': 'all', + } + res = self.client._get(self.client._url('/containers/{0}/logs', container_id), params=params) + output = self.client._get_result_tty(False, res, config['Config']['Tty']) + if self.param_output_logs: self._output_logs(msg=output) else: output = "Result logged using `%s` driver" % logging_driver - if self.parameters.cleanup: + if self.param_cleanup: self.container_remove(container_id, force=True) insp = self._get_container(container_id) if insp.raw: @@ -3149,16 +1969,16 @@ def container_start(self, container_id): return self._get_container(container_id) def container_remove(self, container_id, link=False, force=False): - volume_state = (not self.parameters.keep_volumes) + volume_state = (not self.param_keep_volumes) self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) self.results['changed'] = True - response = None if not self.check_mode: count = 0 while True: try: - response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) + params = {'v': volume_state, 'link': link, 'force': force} + self.client.delete_call('/containers/{0}', container_id, params=params) except NotFound as dummy: pass except APIError as exc: @@ -3170,7 +1990,7 @@ def container_remove(self, container_id, link=False, force=False): count += 1 # Unpause try: - self.client.unpause(container=container_id) + self.client.post_call('/containers/{0}/unpause', container_id) except Exception as exc2: self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) # Now try again @@ -3183,7 +2003,6 @@ def container_remove(self, container_id, link=False, force=False): self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) # We only loop when explicitly requested by 'continue' break - return response def container_update(self, container_id, update_parameters): if update_parameters: @@ -3193,54 +2012,58 @@ def container_update(self, container_id, update_parameters): self.results['changed'] = True if not self.check_mode and callable(getattr(self.client, 'update_container')): try: - result = self.client.update_container(container_id, **update_parameters) + result = self.client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) self.client.report_warnings(result) except Exception as exc: self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) return self._get_container(container_id) def container_kill(self, container_id): - self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) + self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) self.results['changed'] = True - response = None if not self.check_mode: try: - if self.parameters.kill_signal: - response = self.client.kill(container_id, signal=self.parameters.kill_signal) - else: - response = self.client.kill(container_id) + params = {} + if self.param_kill_signal is not None: + params['signal'] = int(self.param_kill_signal) + self.client.post_call('/containers/{0}/kill', container_id, params=params) except Exception as exc: self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) - return response def container_restart(self, container_id): - self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout)) + self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) self.results['changed'] = True if not self.check_mode: try: - if self.parameters.stop_timeout: - dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout) - else: - dummy = self.client.restart(container_id) + timeout = self.module.params['stop_timeout'] or 10 + client_timeout = self.client.timeout + if client_timeout is not None: + client_timeout += timeout + self.client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) except Exception as exc: self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) return self._get_container(container_id) def container_stop(self, container_id): - if self.parameters.force_kill: + if self.param_force_kill: self.container_kill(container_id) return - self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) + self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) self.results['changed'] = True - response = None if not self.check_mode: count = 0 while True: try: - if self.parameters.stop_timeout: - response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) + timeout = self.module.params['stop_timeout'] + if timeout: + params = {'t': timeout} else: - response = self.client.stop(container_id) + params = {} + timeout = 10 + client_timeout = self.client.timeout + if client_timeout is not None: + client_timeout += timeout + self.client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed @@ -3250,7 +2073,7 @@ def container_stop(self, container_id): count += 1 # Unpause try: - self.client.unpause(container=container_id) + self.client.post_call('/containers/{0}/unpause', container_id) except Exception as exc2: self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) # Now try again @@ -3260,303 +2083,22 @@ def container_stop(self, container_id): self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) # We only loop when explicitly requested by 'continue' break - return response - - -def detect_ipvX_address_usage(client): - ''' - Helper function to detect whether any specified network uses ipv4_address or ipv6_address - ''' - for network in client.module.params.get("networks") or []: - if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None: - return True - return False - - -class AnsibleDockerClientContainer(AnsibleDockerClient): - # A list of module options which are not docker container properties - __NON_CONTAINER_PROPERTY_OPTIONS = tuple([ - 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', - 'recreate', 'restart', 'state', 'networks', 'cleanup', 'kill_signal', - 'output_logs', 'paused', 'removal_wait_timeout', 'default_host_ip', 'command_handling', - ] + list(DOCKER_COMMON_ARGS.keys())) - - def _parse_comparisons(self): - comparisons = {} - comp_aliases = {} - # Put in defaults - explicit_types = dict( - command='list', - devices='set(dict)', - device_requests='set(dict)', - dns_search_domains='list', - dns_servers='list', - env='set', - entrypoint='list', - etc_hosts='set', - mounts='set(dict)', - networks='set(dict)', - ulimits='set(dict)', - device_read_bps='set(dict)', - device_write_bps='set(dict)', - device_read_iops='set(dict)', - device_write_iops='set(dict)', - ) - all_options = set() # this is for improving user feedback when a wrong option was specified for comparison - default_values = dict( - stop_timeout='ignore', - ) - for option, data in self.module.argument_spec.items(): - all_options.add(option) - for alias in data.get('aliases', []): - all_options.add(alias) - # Ignore options which aren't used as container properties - if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks': - continue - # Determine option type - if option in explicit_types: - datatype = explicit_types[option] - elif data['type'] == 'list': - datatype = 'set' - elif data['type'] == 'dict': - datatype = 'dict' - else: - datatype = 'value' - # Determine comparison type - if option in default_values: - comparison = default_values[option] - elif datatype in ('list', 'value'): - comparison = 'strict' - else: - comparison = 'allow_more_present' - comparisons[option] = dict(type=datatype, comparison=comparison, name=option) - # Keep track of aliases - comp_aliases[option] = option - for alias in data.get('aliases', []): - comp_aliases[alias] = option - # Process legacy ignore options - if self.module.params['ignore_image']: - comparisons['image']['comparison'] = 'ignore' - if self.module.params['purge_networks']: - comparisons['networks']['comparison'] = 'strict' - # Process options - if self.module.params.get('comparisons'): - # If '*' appears in comparisons, process it first - if '*' in self.module.params['comparisons']: - value = self.module.params['comparisons']['*'] - if value not in ('strict', 'ignore'): - self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") - for option, v in comparisons.items(): - if option == 'networks': - # `networks` is special: only update if - # some value is actually specified - if self.module.params['networks'] is None: - continue - v['comparison'] = value - # Now process all other comparisons. - comp_aliases_used = {} - for key, value in self.module.params['comparisons'].items(): - if key == '*': - continue - # Find main key - key_main = comp_aliases.get(key) - if key_main is None: - if key_main in all_options: - self.fail("The module option '%s' cannot be specified in the comparisons dict, " - "since it does not correspond to container's state!" % key) - self.fail("Unknown module option '%s' in comparisons dict!" % key) - if key_main in comp_aliases_used: - self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) - comp_aliases_used[key_main] = key - # Check value and update accordingly - if value in ('strict', 'ignore'): - comparisons[key_main]['comparison'] = value - elif value == 'allow_more_present': - if comparisons[key_main]['type'] == 'value': - self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) - comparisons[key_main]['comparison'] = value - else: - self.fail("Unknown comparison mode '%s'!" % value) - # Add implicit options - comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports') - comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports') - comparisons['disable_healthcheck'] = dict(type='value', - comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict', - name='disable_healthcheck') - # Check legacy values - if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore': - self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict': - self.module.warn('The purge_networks option has been overridden by the comparisons option!') - self.comparisons = comparisons - - def _get_additional_minimal_versions(self): - stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' - stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1') - if stop_timeout_needed_for_update and not stop_timeout_supported: - # We warn (instead of fail) since in older versions, stop_timeout was not used - # to update the container's configuration, but only when stopping a container. - self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update " - "the container's stop_timeout configuration. " - "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,)) - self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported - - def __init__(self, **kwargs): - option_minimal_versions = dict( - # internal options - log_config=dict(), - publish_all_ports=dict(), - ports=dict(), - volume_binds=dict(), - name=dict(), - # normal options - device_read_bps=dict(docker_py_version='1.9.0'), - device_read_iops=dict(docker_py_version='1.9.0'), - device_write_bps=dict(docker_py_version='1.9.0'), - device_write_iops=dict(docker_py_version='1.9.0'), - device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'), - dns_opts=dict(docker_py_version='1.10.0'), - auto_remove=dict(docker_py_version='2.1.0'), - healthcheck=dict(docker_py_version='2.0.0'), - init=dict(docker_py_version='2.2.0'), - runtime=dict(docker_py_version='2.4.0'), - sysctls=dict(docker_py_version='1.10.0'), - userns_mode=dict(docker_py_version='1.10.0'), - uts=dict(docker_py_version='3.5.0'), - pids_limit=dict(docker_py_version='1.10.0'), - mounts=dict(docker_py_version='2.6.0'), - cpus=dict(docker_py_version='2.3.0'), - storage_opts=dict(docker_py_version='2.1.0'), - # specials - ipvX_address_supported=dict(docker_py_version='1.9.0', - detect_usage=detect_ipvX_address_usage, - usage_msg='ipv4_address or ipv6_address in networks'), - stop_timeout=dict(), # see _get_additional_minimal_versions() - ) - - super(AnsibleDockerClientContainer, self).__init__( - option_minimal_versions=option_minimal_versions, - option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS, - **kwargs - ) - - self._get_additional_minimal_versions() - self._parse_comparisons() - - if self.module.params['container_default_behavior'] == 'compatibility': - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory="0", - paused=False, - privileged=False, - read_only=False, - tty=False, - ) - for param, value in old_default_values.items(): - if self.module.params[param] is None: - self.module.params[param] = value def main(): argument_spec = dict( - auto_remove=dict(type='bool'), - blkio_weight=dict(type='int'), - capabilities=dict(type='list', elements='str'), - cap_drop=dict(type='list', elements='str'), - cgroup_parent=dict(type='str'), cleanup=dict(type='bool', default=False), - command=dict(type='raw'), comparisons=dict(type='dict'), container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), - cpu_period=dict(type='int'), - cpu_quota=dict(type='int'), - cpus=dict(type='float'), - cpuset_cpus=dict(type='str'), - cpuset_mems=dict(type='str'), - cpu_shares=dict(type='int'), + command_handling=dict(type='str', choices=['compatibility', 'correct']), default_host_ip=dict(type='str'), - detach=dict(type='bool'), - devices=dict(type='list', elements='str'), - device_read_bps=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )), - device_write_bps=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )), - device_read_iops=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )), - device_write_iops=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )), - device_requests=dict(type='list', elements='dict', options=dict( - capabilities=dict(type='list', elements='list'), - count=dict(type='int'), - device_ids=dict(type='list', elements='str'), - driver=dict(type='str'), - options=dict(type='dict'), - )), - dns_servers=dict(type='list', elements='str'), - dns_opts=dict(type='list', elements='str'), - dns_search_domains=dict(type='list', elements='str'), - domainname=dict(type='str'), - entrypoint=dict(type='list', elements='str'), - env=dict(type='dict'), - env_file=dict(type='path'), - etc_hosts=dict(type='dict'), - exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), force_kill=dict(type='bool', default=False, aliases=['forcekill']), - groups=dict(type='list', elements='str'), - healthcheck=dict(type='dict', options=dict( - test=dict(type='raw'), - interval=dict(type='str'), - timeout=dict(type='str'), - start_period=dict(type='str'), - retries=dict(type='int'), - )), - hostname=dict(type='str'), ignore_image=dict(type='bool', default=False), image=dict(type='str'), image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - init=dict(type='bool'), - interactive=dict(type='bool'), - ipc_mode=dict(type='str'), keep_volumes=dict(type='bool', default=True), - kernel_memory=dict(type='str'), kill_signal=dict(type='str'), - labels=dict(type='dict'), - links=dict(type='list', elements='str'), - log_driver=dict(type='str'), - log_options=dict(type='dict', aliases=['log_opt']), - mac_address=dict(type='str'), - memory=dict(type='str'), - memory_reservation=dict(type='str'), - memory_swap=dict(type='str'), - memory_swappiness=dict(type='int'), - mounts=dict(type='list', elements='dict', options=dict( - target=dict(type='str', required=True), - source=dict(type='str'), - type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), - read_only=dict(type='bool'), - consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), - propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), - no_copy=dict(type='bool'), - labels=dict(type='dict'), - volume_driver=dict(type='str'), - volume_options=dict(type='dict'), - tmpfs_size=dict(type='str'), - tmpfs_mode=dict(type='str'), - )), name=dict(type='str', required=True), - network_mode=dict(type='str'), networks=dict(type='list', elements='dict', options=dict( name=dict(type='str', required=True), ipv4_address=dict(type='str'), @@ -3565,65 +2107,67 @@ def main(): links=dict(type='list', elements='str'), )), networks_cli_compatible=dict(type='bool', default=True), - oom_killer=dict(type='bool'), - oom_score_adj=dict(type='int'), output_logs=dict(type='bool', default=False), paused=dict(type='bool'), - pid_mode=dict(type='str'), - pids_limit=dict(type='int'), - privileged=dict(type='bool'), - publish_all_ports=dict(type='bool'), - published_ports=dict(type='list', elements='str', aliases=['ports']), pull=dict(type='bool', default=False), purge_networks=dict(type='bool', default=False), - read_only=dict(type='bool'), recreate=dict(type='bool', default=False), removal_wait_timeout=dict(type='float'), restart=dict(type='bool', default=False), - restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), - restart_retries=dict(type='int'), - runtime=dict(type='str'), - security_opts=dict(type='list', elements='str'), - shm_size=dict(type='str'), state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), stop_signal=dict(type='str'), - stop_timeout=dict(type='int'), - storage_opts=dict(type='dict'), - sysctls=dict(type='dict'), - tmpfs=dict(type='list', elements='str'), - tty=dict(type='bool'), - ulimits=dict(type='list', elements='str'), - user=dict(type='str'), - userns_mode=dict(type='str'), - uts=dict(type='str'), - volume_driver=dict(type='str'), - volumes=dict(type='list', elements='str'), - volumes_from=dict(type='list', elements='str'), - working_dir=dict(type='str'), ) + mutually_exclusive = [] + required_together = [] + required_one_of = [] required_if = [ ('state', 'present', ['image']) ] + required_by = {} + + option_minimal_versions = {} + + active_options = [] + for options in OPTIONS: + if not options.supports_engine('docker_api'): + continue + + mutually_exclusive.extend(options.ansible_mutually_exclusive) + required_together.extend(options.ansible_required_together) + required_one_of.extend(options.ansible_required_one_of) + required_if.extend(options.ansible_required_if) + required_by.update(options.ansible_required_by) + argument_spec.update(options.argument_spec) + + engine = options.get_engine('docker_api') + if engine.min_docker_api is not None: + for option in options.options: + if not option.not_an_ansible_option: + option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} + + active_options.append(options) - client = AnsibleDockerClientContainer( + client = AnsibleDockerClient( argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, required_if=required_if, + required_by=required_by, + option_minimal_versions=option_minimal_versions, supports_check_mode=True, ) - if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - client.module.params['network_mode'] = client.module.params['networks'][0]['name'] try: - cm = ContainerManager(client) + cm = ContainerManager(client.module, client, active_options) + cm.run() client.module.exit_json(**sanitize_result(cm.results)) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), exception=traceback.format_exc()) diff --git a/plugins/modules/docker_container2.py b/plugins/modules/docker_container2.py deleted file mode 100644 index 1003276c7..000000000 --- a/plugins/modules/docker_container2.py +++ /dev/null @@ -1,2175 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 Red Hat | Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: docker_container2 - -short_description: manage Docker containers - -description: - - Manage the life cycle of Docker containers. - - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. - - -notes: - - For most config changes, the container needs to be recreated. This means that the existing container has to be destroyed and - a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to - prevent this. - - If the module needs to recreate the container, it will only use the options provided to the module to create the - new container (except I(image)). Therefore, always specify B(all) options relevant to the container. - - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected. - -options: - auto_remove: - description: - - Enable auto-removal of the container on daemon side when the container's process exits. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - blkio_weight: - description: - - Block IO (relative weight), between 10 and 1000. - type: int - capabilities: - description: - - List of capabilities to add to the container. - - This is equivalent to C(docker run --cap-add), or the docker-compose option C(cap_add). - type: list - elements: str - cap_drop: - description: - - List of capabilities to drop from the container. - type: list - elements: str - cgroup_parent: - description: - - Specify the parent cgroup for the container. - type: str - version_added: 1.1.0 - cleanup: - description: - - Use with I(detach=false) to remove the container after successful execution. - type: bool - default: no - command: - description: - - Command to execute when the container starts. A command may be either a string or a list. - - Prior to version 2.4, strings were split on commas. - - See I(command_handling) for differences in how strings and lists are handled. - type: raw - comparisons: - description: - - Allows to specify how properties of existing containers are compared with - module options to decide whether the container should be recreated / updated - or not. - - Only options which correspond to the state of a container as handled by the - Docker daemon can be specified, as well as C(networks). - - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) - and C(allow_more_present). - - If C(strict) is specified, values are tested for equality, and changes always - result in updating or restarting. If C(ignore) is specified, changes are ignored. - - C(allow_more_present) is allowed only for lists, sets and dicts. If it is - specified for lists or sets, the container will only be updated or restarted if - the module option contains a value which is not present in the container's - options. If the option is specified for a dict, the container will only be updated - or restarted if the module option contains a key which is not present in the - container's option, or if the value of a key present differs. - - The wildcard option C(*) can be used to set one of the default values C(strict) - or C(ignore) to I(all) comparisons which are not explicitly set to other values. - - See the examples for details. - type: dict - container_default_behavior: - description: - - In older versions of this module, various module options used to have default values. - This caused problems with containers which use different values for these options. - - The default value is now C(no_defaults). To restore the old behavior, set it to - C(compatibility), which will ensure that the default values are used when the values - are not explicitly specified by the user. - - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory), - I(paused), I(privileged), I(read_only) and I(tty) options. - type: str - choices: - - compatibility - - no_defaults - default: no_defaults - command_handling: - description: - - The default behavior for I(command) (when provided as a list) and I(entrypoint) is to - convert them to strings without considering shell quoting rules. (For comparing idempotency, - the resulting string is split considering shell quoting rules.) - - Also, setting I(command) to an empty list of string, and setting I(entrypoint) to an empty - list will be handled as if these options are not specified. This is different from idempotency - handling for other container-config related options. - - When this is set to C(compatibility), which was the default until community.docker 3.0.0, the - current behavior will be kept. - - When this is set to C(correct), these options are kept as lists, and an empty value or empty - list will be handled correctly for idempotency checks. This has been the default since - community.docker 3.0.0. - type: str - choices: - - compatibility - - correct - version_added: 1.9.0 - default: correct - cpu_period: - description: - - Limit CPU CFS (Completely Fair Scheduler) period. - - See I(cpus) for an easier to use alternative. - type: int - cpu_quota: - description: - - Limit CPU CFS (Completely Fair Scheduler) quota. - - See I(cpus) for an easier to use alternative. - type: int - cpus: - description: - - Specify how much of the available CPU resources a container can use. - - A value of C(1.5) means that at most one and a half CPU (core) will be used. - type: float - cpuset_cpus: - description: - - CPUs in which to allow execution C(1,3) or C(1-3). - type: str - cpuset_mems: - description: - - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1). - type: str - cpu_shares: - description: - - CPU shares (relative weight). - type: int - default_host_ip: - description: - - Define the default host IP to use. - - Must be an empty string, an IPv4 address, or an IPv6 address. - - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the - port bindings without an explicit IP address to only bind to IPv4. - See U(https://github.com/ansible-collections/community.docker/issues/70) for details. - - By default, the module will try to auto-detect this value from the C(bridge) network's - C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it - will fall back to C(0.0.0.0). - type: str - version_added: 1.2.0 - detach: - description: - - Enable detached mode to leave the container running in background. - - If disabled, the task will reflect the status of the container run (failed if the command failed). - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(true). - type: bool - devices: - description: - - List of host device bindings to add to the container. - - "Each binding is a mapping expressed in the format C(::)." - type: list - elements: str - device_read_bps: - description: - - "List of device path and read rate (bytes per second) from device." - type: list - elements: dict - suboptions: - path: - description: - - Device path in the container. - type: str - required: yes - rate: - description: - - "Device read limit in format C([])." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - "Omitting the unit defaults to bytes." - type: str - required: yes - device_write_bps: - description: - - "List of device and write rate (bytes per second) to device." - type: list - elements: dict - suboptions: - path: - description: - - Device path in the container. - type: str - required: yes - rate: - description: - - "Device read limit in format C([])." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - "Omitting the unit defaults to bytes." - type: str - required: yes - device_read_iops: - description: - - "List of device and read rate (IO per second) from device." - type: list - elements: dict - suboptions: - path: - description: - - Device path in the container. - type: str - required: yes - rate: - description: - - "Device read limit." - - "Must be a positive integer." - type: int - required: yes - device_write_iops: - description: - - "List of device and write rate (IO per second) to device." - type: list - elements: dict - suboptions: - path: - description: - - Device path in the container. - type: str - required: yes - rate: - description: - - "Device read limit." - - "Must be a positive integer." - type: int - required: yes - device_requests: - description: - - Allows to request additional resources, such as GPUs. - type: list - elements: dict - suboptions: - capabilities: - description: - - List of lists of strings to request capabilities. - - The top-level list entries are combined by OR, and for every list entry, - the entries in the list it contains are combined by AND. - - The driver tries to satisfy one of the sub-lists. - - Available capabilities for the C(nvidia) driver can be found at - U(https://github.com/NVIDIA/nvidia-container-runtime). - type: list - elements: list - count: - description: - - Number or devices to request. - - Set to C(-1) to request all available devices. - type: int - device_ids: - description: - - List of device IDs. - type: list - elements: str - driver: - description: - - Which driver to use for this device. - type: str - options: - description: - - Driver-specific options. - type: dict - version_added: 0.1.0 - dns_opts: - description: - - List of DNS options. - type: list - elements: str - dns_servers: - description: - - List of custom DNS servers. - type: list - elements: str - dns_search_domains: - description: - - List of custom DNS search domains. - type: list - elements: str - domainname: - description: - - Container domainname. - type: str - env: - description: - - Dictionary of key,value pairs. - - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss. - - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to - convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}"). - type: dict - env_file: - description: - - Path to a file, present on the target, containing environment variables I(FOO=BAR). - - If variable also present in I(env), then the I(env) value will override. - type: path - entrypoint: - description: - - Command that overwrites the default C(ENTRYPOINT) of the image. - - See I(command_handling) for differences in how strings and lists are handled. - type: list - elements: str - etc_hosts: - description: - - Dict of host-to-IP mappings, where each host name is a key in the dictionary. - Each host name will be added to the container's C(/etc/hosts) file. - type: dict - exposed_ports: - description: - - List of additional container ports which informs Docker that the container - listens on the specified network ports at runtime. - - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not - need to be exposed again. - type: list - elements: str - aliases: - - exposed - - expose - force_kill: - description: - - Use the kill command when stopping a running container. - type: bool - default: no - aliases: - - forcekill - groups: - description: - - List of additional group names and/or IDs that the container process will run as. - type: list - elements: str - healthcheck: - description: - - Configure a check that is run to determine whether or not containers for this service are "healthy". - - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) - for details on how healthchecks work." - - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format - that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." - type: dict - suboptions: - test: - description: - - Command to run to check health. - - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). - type: raw - interval: - description: - - Time between running the check. - - The default used by the Docker daemon is C(30s). - type: str - timeout: - description: - - Maximum time to allow one check to run. - - The default used by the Docker daemon is C(30s). - type: str - retries: - description: - - Consecutive number of failures needed to report unhealthy. - - The default used by the Docker daemon is C(3). - type: int - start_period: - description: - - Start period for the container to initialize before starting health-retries countdown. - - The default used by the Docker daemon is C(0s). - type: str - hostname: - description: - - The container's hostname. - type: str - ignore_image: - description: - - When I(state) is C(present) or C(started), the module compares the configuration of an existing - container to requested configuration. The evaluation includes the image version. If the image - version in the registry does not match the container, the container will be recreated. You can - stop this behavior by setting I(ignore_image) to C(True). - - "B(Warning:) This option is ignored if C(image: ignore) or C(*: ignore) is specified in the - I(comparisons) option." - type: bool - default: no - image: - description: - - Repository path and tag used to create the container. If an image is not found or pull is true, the image - will be pulled from the registry. If no tag is included, C(latest) will be used. - - Can also be an image ID. If this is the case, the image is assumed to be available locally. - The I(pull) option is ignored for this case. - type: str - image_label_mismatch: - description: - - How to handle labels inherited from the image that are not set explicitly. - - When C(ignore), labels that are present in the image but not specified in I(labels) will be - ignored. This is useful to avoid having to specify the image labels in I(labels) while keeping - labels I(comparisons) C(strict). - - When C(fail), if there are labels present in the image which are not set from I(labels), the - module will fail. This prevents introducing unexpected labels from the base image. - - "B(Warning:) This option is ignored unless C(labels: strict) or C(*: strict) is specified in - the I(comparisons) option." - type: str - choices: - - 'ignore' - - 'fail' - default: ignore - version_added: 2.6.0 - init: - description: - - Run an init inside the container that forwards signals and reaps processes. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - interactive: - description: - - Keep stdin open after a container is launched, even if not attached. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - ipc_mode: - description: - - Set the IPC mode for the container. - - Can be one of C(container:) to reuse another container's IPC namespace or C(host) to use - the host's IPC namespace within the container. - type: str - keep_volumes: - description: - - Retain anonymous volumes associated with a removed container. - type: bool - default: yes - kill_signal: - description: - - Override default signal used to kill a running container. - type: str - kernel_memory: - description: - - "Kernel memory limit in format C([]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." - - Omitting the unit defaults to bytes. - type: str - labels: - description: - - Dictionary of key value pairs. - type: dict - links: - description: - - List of name aliases for linked containers in the format C(container_name:alias). - - Setting this will force container to be restarted. - type: list - elements: str - log_driver: - description: - - Specify the logging driver. Docker uses C(json-file) by default. - - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. - type: str - log_options: - description: - - Dictionary of options specific to the chosen I(log_driver). - - See U(https://docs.docker.com/engine/admin/logging/overview/) for details. - - I(log_driver) needs to be specified for I(log_options) to take effect, even if using the default C(json-file) driver. - type: dict - aliases: - - log_opt - mac_address: - description: - - Container MAC address (for example, C(92:d0:c6:0a:29:33)). - type: str - memory: - description: - - "Memory limit in format C([]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - Omitting the unit defaults to bytes. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C("0"). - type: str - memory_reservation: - description: - - "Memory soft limit in format C([]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - Omitting the unit defaults to bytes. - type: str - memory_swap: - description: - - "Total memory limit (memory + swap) in format C([]), or - the special values C(unlimited) or C(-1) for unlimited swap usage. - Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), - C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." - - Omitting the unit defaults to bytes. - type: str - memory_swappiness: - description: - - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - - If not set, the value will be remain the same if container exists and will be inherited - from the host machine if it is (re-)created. - type: int - mounts: - type: list - elements: dict - description: - - Specification for mounts to be added to the container. More powerful alternative to I(volumes). - suboptions: - target: - description: - - Path inside the container. - type: str - required: true - source: - description: - - Mount source. - - For example, this can be a volume name or a host path. - - If not supplied when I(type=volume) an anonymous volume will be created. - type: str - type: - description: - - The mount type. - - Note that C(npipe) is only supported by Docker for Windows. - type: str - choices: - - bind - - npipe - - tmpfs - - volume - default: volume - read_only: - description: - - Whether the mount should be read-only. - type: bool - consistency: - description: - - The consistency requirement for the mount. - type: str - choices: - - cached - - consistent - - default - - delegated - propagation: - description: - - Propagation mode. Only valid for the C(bind) type. - type: str - choices: - - private - - rprivate - - shared - - rshared - - slave - - rslave - no_copy: - description: - - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. - - The default value is C(false). - type: bool - labels: - description: - - User-defined name and labels for the volume. Only valid for the C(volume) type. - type: dict - volume_driver: - description: - - Specify the volume driver. Only valid for the C(volume) type. - - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. - type: str - volume_options: - description: - - Dictionary of options specific to the chosen volume_driver. See - L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. - type: dict - tmpfs_size: - description: - - "The size for the tmpfs mount in bytes in format []." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - "Omitting the unit defaults to bytes." - type: str - tmpfs_mode: - description: - - The permission mode for the tmpfs mount. - type: str - name: - description: - - Assign a name to a new container or match an existing container. - - When identifying an existing container name may be a name or a long or short container ID. - type: str - required: yes - network_mode: - description: - - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:), C() or C(default). - - "Since community.docker 2.0.0, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network, - the default value for I(network_mode) is the name of the first network in the I(networks) list. You can prevent this - by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if - I(network_mode) is not specified." - type: str - userns_mode: - description: - - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string. - type: str - networks: - description: - - List of networks the container belongs to. - - For examples of the data structure and usage see EXAMPLES below. - - To remove a container from one or more networks, use the I(purge_networks) option. - - If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified. - This is different from the behavior of C(docker run ...). You need to explicitly use I(purge_networks) to enforce - the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case. - type: list - elements: dict - suboptions: - name: - description: - - The network's name. - type: str - required: yes - ipv4_address: - description: - - The container's IPv4 address in this network. - type: str - ipv6_address: - description: - - The container's IPv6 address in this network. - type: str - links: - description: - - A list of containers to link to. - type: list - elements: str - aliases: - description: - - List of aliases for this container in this network. These names - can be used in the network to reach this container. - type: list - elements: str - networks_cli_compatible: - description: - - "If I(networks_cli_compatible) is set to C(yes) (default), this module will behave as - C(docker run --network) and will B(not) add the default network if I(networks) is - specified. If I(networks) is not specified, the default network will be attached." - - "When I(networks_cli_compatible) is set to C(no) and networks are provided to the module - via the I(networks) option, the module behaves differently than C(docker run --network): - C(docker run --network other) will create a container with network C(other) attached, - but the default network not attached. This module with I(networks: {name: other}) will - create a container with both C(default) and C(other) attached. If I(purge_networks) is - set to C(yes), the C(default) network will be removed afterwards." - type: bool - default: true - oom_killer: - description: - - Whether or not to disable OOM Killer for the container. - type: bool - oom_score_adj: - description: - - An integer value containing the score given to the container in order to tune - OOM killer preferences. - type: int - output_logs: - description: - - If set to true, output of the container command will be printed. - - Only effective when I(log_driver) is set to C(json-file), C(journald), or C(local). - type: bool - default: no - paused: - description: - - Use with the started state to pause running processes inside the container. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - pid_mode: - description: - - Set the PID namespace mode for the container. - type: str - pids_limit: - description: - - Set PIDs limit for the container. It accepts an integer value. - - Set C(-1) for unlimited PIDs. - type: int - privileged: - description: - - Give extended privileges to the container. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - publish_all_ports: - description: - - Publish all ports to the host. - - Any specified port bindings from I(published_ports) will remain intact when C(true). - type: bool - version_added: 1.8.0 - published_ports: - description: - - List of ports to publish from the container to the host. - - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a - container port, 9000 is a host port, and 0.0.0.0 is a host interface." - - Port ranges can be used for source and destination ports. If two ranges with - different lengths are specified, the shorter range will be used. - Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned - to the first port of the destination range, but to a free port in that range. This is the - same behavior as for C(docker) command line utility. - - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are B(not) allowed. This - is different from the C(docker) command line utility. Use the R(dig lookup,ansible_collections.community.general.dig_lookup) - to resolve hostnames." - - If I(networks) parameter is provided, will inspect each network to see if there exists - a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4). - If such a network is found, then published ports where no host IP address is specified - will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4). - Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4) - value encountered in the list of I(networks) is the one that will be used. - - The value C(all) was allowed in earlier versions of this module. Support for it was removed in - community.docker 3.0.0. Use the I(publish_all_ports) option instead. - type: list - elements: str - aliases: - - ports - pull: - description: - - If true, always pull the latest version of an image. Otherwise, will only pull an image - when missing. - - "B(Note:) images are only pulled when specified by name. If the image is specified - as a image ID (hash), it cannot be pulled." - type: bool - default: no - purge_networks: - description: - - Remove the container from ALL networks not included in I(networks) parameter. - - Any default networks such as C(bridge), if not found in I(networks), will be removed as well. - type: bool - default: no - read_only: - description: - - Mount the container's root file system as read-only. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - recreate: - description: - - Use with present and started states to force the re-creation of an existing container. - type: bool - default: no - removal_wait_timeout: - description: - - When removing an existing container, the docker daemon API call exists after the container - is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O - load, removal can take longer. By default, the module will wait until the container has been - removed before trying to (re-)create it, however long this takes. - - By setting this option, the module will wait at most this many seconds for the container to be - removed. If the container is still in the removal phase after this many seconds, the module will - fail. - type: float - restart: - description: - - Use with started state to force a matching container to be stopped and restarted. - type: bool - default: no - restart_policy: - description: - - Container restart policy. - - Place quotes around C(no) option. - type: str - choices: - - 'no' - - 'on-failure' - - 'always' - - 'unless-stopped' - restart_retries: - description: - - Use with restart policy to control maximum number of restart attempts. - type: int - runtime: - description: - - Runtime to use for the container. - type: str - shm_size: - description: - - "Size of C(/dev/shm) in format C([]). Number is positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M). - type: str - security_opts: - description: - - List of security options in the form of C("label:user:User"). - type: list - elements: str - state: - description: - - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container - rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.' - - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no - container matches the name, a container will be created. If a container matches the name but the provided configuration - does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created - with the requested config.' - - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running - state. Use I(restart) to force a matching container to be stopped and restarted.' - - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped - state.' - - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the - image version will be taken into account, you can also use the I(ignore_image) option. - - Use the I(recreate) option to always force re-creation of a matching container, even if it is running. - - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is - C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container. - - Use I(keep_volumes) to retain anonymous volumes associated with a removed container. - type: str - default: started - choices: - - absent - - present - - stopped - - started - stop_signal: - description: - - Override default signal used to stop the container. - type: str - stop_timeout: - description: - - Number of seconds to wait for the container to stop before sending C(SIGKILL). - When the container is created by this module, its C(StopTimeout) configuration - will be set to this value. - - When the container is stopped, will be used as a timeout for stopping the - container. In case the container has a custom C(StopTimeout) configuration, - the behavior depends on the version of the docker daemon. New versions of - the docker daemon will always use the container's configured C(StopTimeout) - value if it has been configured. - type: int - storage_opts: - description: - - Storage driver options for this container as a key-value mapping. - type: dict - version_added: 1.3.0 - tmpfs: - description: - - Mount a tmpfs directory. - type: list - elements: str - tty: - description: - - Allocate a pseudo-TTY. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). - type: bool - ulimits: - description: - - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)." - type: list - elements: str - sysctls: - description: - - Dictionary of key,value pairs. - type: dict - user: - description: - - Sets the username or UID used and optionally the groupname or GID for the specified command. - - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)." - type: str - uts: - description: - - Set the UTS namespace mode for the container. - type: str - volumes: - description: - - List of volumes to mount within the container. - - "Use docker CLI-style syntax: C(/host:/container[:mode])" - - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), - C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and - C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." - - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. - - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), - C(z), and C(Z)." - type: list - elements: str - volume_driver: - description: - - The container volume driver. - type: str - volumes_from: - description: - - List of container names or IDs to get volumes from. - type: list - elements: str - working_dir: - description: - - Path to the working directory. - type: str -extends_documentation_fragment: -- community.docker.docker -- community.docker.docker.docker_py_1_documentation - - -author: - - "Cove Schneider (@cove)" - - "Joshua Conner (@joshuaconner)" - - "Pavel Antonov (@softzilla)" - - "Thomas Steinbach (@ThomasSteinbach)" - - "Philippe Jandot (@zfil)" - - "Daan Oosterveld (@dusdanig)" - - "Chris Houseknecht (@chouseknecht)" - - "Kassian Sun (@kassiansun)" - - "Felix Fontein (@felixfontein)" - -requirements: - - "Docker API >= 1.25" -''' - -EXAMPLES = ''' -- name: Create a data container - community.docker.docker_container: - name: mydata - image: busybox - volumes: - - /data - -- name: Re-create a redis container - community.docker.docker_container: - name: myredis - image: redis - command: redis-server --appendonly yes - state: present - recreate: yes - exposed_ports: - - 6379 - volumes_from: - - mydata - -- name: Restart a container - community.docker.docker_container: - name: myapplication - image: someuser/appimage - state: started - restart: yes - links: - - "myredis:aliasedredis" - devices: - - "/dev/sda:/dev/xvda:rwm" - ports: - # Publish container port 9000 as host port 8080 - - "8080:9000" - # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1 - - "127.0.0.1:8081:9001/udp" - # Publish container port 9002 as a random host port - - "9002" - # Publish container port 9003 as a free host port in range 8000-8100 - # (the host port will be selected by the Docker daemon) - - "8000-8100:9003" - # Publish container ports 9010-9020 to host ports 7000-7010 - - "7000-7010:9010-9020" - env: - SECRET_KEY: "ssssh" - # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted - BOOLEAN_KEY: "yes" - -- name: Container present - community.docker.docker_container: - name: mycontainer - state: present - image: ubuntu:14.04 - command: sleep infinity - -- name: Stop a container - community.docker.docker_container: - name: mycontainer - state: stopped - -- name: Start 4 load-balanced containers - community.docker.docker_container: - name: "container{{ item }}" - recreate: yes - image: someuser/anotherappimage - command: sleep 1d - with_sequence: count=4 - -- name: Remove container - community.docker.docker_container: - name: ohno - state: absent - -- name: Syslogging output - community.docker.docker_container: - name: myservice - image: busybox - log_driver: syslog - log_options: - syslog-address: tcp://my-syslog-server:514 - syslog-facility: daemon - # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for - # older docker installs, use "syslog-tag" instead - tag: myservice - -- name: Create db container and connect to network - community.docker.docker_container: - name: db_test - image: "postgres:latest" - networks: - - name: "{{ docker_network_name }}" - -- name: Start container, connect to network and link - community.docker.docker_container: - name: sleeper - image: ubuntu:14.04 - networks: - - name: TestingNet - ipv4_address: "172.16.1.100" - aliases: - - sleepyzz - links: - - db_test:db - - name: TestingNet2 - -- name: Start a container with a command - community.docker.docker_container: - name: sleepy - image: ubuntu:14.04 - command: ["sleep", "infinity"] - -- name: Add container to networks - community.docker.docker_container: - name: sleepy - networks: - - name: TestingNet - ipv4_address: 172.16.1.18 - links: - - sleeper - - name: TestingNet2 - ipv4_address: 172.16.10.20 - -- name: Update network with aliases - community.docker.docker_container: - name: sleepy - networks: - - name: TestingNet - aliases: - - sleepyz - - zzzz - -- name: Remove container from one network - community.docker.docker_container: - name: sleepy - networks: - - name: TestingNet2 - purge_networks: yes - -- name: Remove container from all networks - community.docker.docker_container: - name: sleepy - purge_networks: yes - -- name: Start a container and use an env file - community.docker.docker_container: - name: agent - image: jenkinsci/ssh-slave - env_file: /var/tmp/jenkins/agent.env - -- name: Create a container with limited capabilities - community.docker.docker_container: - name: sleepy - image: ubuntu:16.04 - command: sleep infinity - capabilities: - - sys_time - cap_drop: - - all - -- name: Finer container restart/update control - community.docker.docker_container: - name: test - image: ubuntu:18.04 - env: - arg1: "true" - arg2: "whatever" - volumes: - - /tmp:/tmp - comparisons: - image: ignore # do not restart containers with older versions of the image - env: strict # we want precisely this environment - volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there - -- name: Finer container restart/update control II - community.docker.docker_container: - name: test - image: ubuntu:18.04 - env: - arg1: "true" - arg2: "whatever" - comparisons: - '*': ignore # by default, ignore *all* options (including image) - env: strict # except for environment variables; there, we want to be strict - -- name: Start container with healthstatus - community.docker.docker_container: - name: nginx-proxy - image: nginx:1.13 - state: started - healthcheck: - # Check if nginx server is healthy by curl'ing the server. - # If this fails or timeouts, the healthcheck fails. - test: ["CMD", "curl", "--fail", "http://nginx.host.com"] - interval: 1m30s - timeout: 10s - retries: 3 - start_period: 30s - -- name: Remove healthcheck from container - community.docker.docker_container: - name: nginx-proxy - image: nginx:1.13 - state: started - healthcheck: - # The "NONE" check needs to be specified - test: ["NONE"] - -- name: Start container with block device read limit - community.docker.docker_container: - name: test - image: ubuntu:18.04 - state: started - device_read_bps: - # Limit read rate for /dev/sda to 20 mebibytes per second - - path: /dev/sda - rate: 20M - device_read_iops: - # Limit read rate for /dev/sdb to 300 IO per second - - path: /dev/sdb - rate: 300 - -- name: Start container with GPUs - community.docker.docker_container: - name: test - image: ubuntu:18.04 - state: started - device_requests: - - # Add some specific devices to this container - device_ids: - - '0' - - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' - - # Add nVidia GPUs to this container - driver: nvidia - count: -1 # this means we want all - capabilities: - # We have one OR condition: 'gpu' AND 'utility' - - - gpu - - utility - # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities - # for a list of capabilities supported by the nvidia driver - -- name: Start container with storage options - community.docker.docker_container: - name: test - image: ubuntu:18.04 - state: started - storage_opts: - # Limit root filesystem to 12 MB - note that this requires special storage backends - # (https://fabianlee.org/2020/01/15/docker-use-overlay2-with-an-xfs-backing-filesystem-to-limit-rootfs-size/) - size: 12m -''' - -RETURN = ''' -container: - description: - - Facts representing the current state of the container. Matches the docker inspection output. - - Empty if I(state) is C(absent). - - If I(detach=false), will include C(Output) attribute containing any output from container run. - returned: success; or when I(state=started) and I(detach=false), and when waiting for the container result did not fail - type: dict - sample: '{ - "AppArmorProfile": "", - "Args": [], - "Config": { - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/usr/bin/supervisord" - ], - "Domainname": "", - "Entrypoint": null, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "ExposedPorts": { - "443/tcp": {}, - "80/tcp": {} - }, - "Hostname": "8e47bf643eb9", - "Image": "lnmp_nginx:v1", - "Labels": {}, - "OnBuild": null, - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": { - "/tmp/lnmp/nginx-sites/logs/": {} - }, - ... - }' -status: - description: - - In case a container is started without detaching, this contains the exit code of the process in the container. - - Before community.docker 1.1.0, this was only returned when non-zero. - returned: when I(state=started) and I(detach=false), and when waiting for the container result did not fail - type: int - sample: 0 -''' - -import os -import re -import shlex -import traceback -from time import sleep - -from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import string_types - -from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion - -from ansible_collections.community.docker.plugins.module_utils.common_api import ( - AnsibleDockerClient, - RequestException, -) -from ansible_collections.community.docker.plugins.module_utils.module_container import ( - DockerAPIEngineDriver, - OPTIONS, - Option, -) -from ansible_collections.community.docker.plugins.module_utils.util import ( - DifferenceTracker, - DockerBaseClass, - compare_generic, - is_image_name_id, - sanitize_result, - clean_dict_booleans_for_docker_api, - omit_none_from_dict, - parse_healthcheck, - DOCKER_COMMON_ARGS, -) - -from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound - -from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag, normalize_links - - -class Container(DockerBaseClass): - def __init__(self, container): - super(Container, self).__init__() - self.raw = container - self.Id = None - self.Image = None - self.container = container - if container: - self.Id = container['Id'] - self.Image = container['Image'] - self.log(self.container, pretty_print=True) - - @property - def exists(self): - return True if self.container else False - - @property - def removing(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Status') == 'removing' - return False - - @property - def running(self): - if self.container and self.container.get('State'): - if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): - return True - return False - - @property - def paused(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Paused', False) - return False - - -class ContainerManager(DockerBaseClass): - def __init__(self, module, client, active_options): - self.client = client - self.options = active_options - self.all_options = self._collect_all_options(active_options) - self.module = module - self.check_mode = self.module.check_mode - self.param_cleanup = self.module.params['cleanup'] - self.param_container_default_behavior = self.module.params['container_default_behavior'] - self.param_default_host_ip = self.module.params['default_host_ip'] - self.param_debug = self.module.params['debug'] - self.param_force_kill = self.module.params['force_kill'] - self.param_image = self.module.params['image'] - self.param_image_label_mismatch = self.module.params['image_label_mismatch'] - self.param_keep_volumes = self.module.params['keep_volumes'] - self.param_kill_signal = self.module.params['kill_signal'] - self.param_name = self.module.params['name'] - self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] - self.param_output_logs = self.module.params['output_logs'] - self.param_paused = self.module.params['paused'] - self.param_pull = self.module.params['pull'] - self.param_purge_networks = self.module.params['purge_networks'] - self.param_recreate = self.module.params['recreate'] - self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] - self.param_restart = self.module.params['restart'] - self.param_state = self.module.params['state'] - self._parse_comparisons() - self._update_params() - self.parameters = self._collect_params(active_options) - self.results = {'changed': False, 'actions': []} - self.diff = {} - self.diff_tracker = DifferenceTracker() - self.facts = {} - if self.param_default_host_ip: - valid_ip = False - if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): - valid_ip = True - if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): - valid_ip = True - if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): - self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) - valid_ip = True - if not valid_ip: - self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' - 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) - - - def _collect_all_options(self, active_options): - all_options = {} - for options in active_options: - for option in options.options: - all_options[option.name] = option - for option in [ - Option('image', 'str', None), - Option('networks', 'set', None, elements='dict', ansible_suboptions={}), - ]: - all_options[option.name] = option - return all_options - - def _collect_all_module_params(self): - all_module_options = set() - for option, data in self.module.argument_spec.items(): - all_module_options.add(option) - if 'aliases' in data: - for alias in data['aliases']: - all_module_options.add(alias) - return all_module_options - - def _parse_comparisons(self): - # Keep track of all module params and all option aliases - all_module_options = self._collect_all_module_params() - comp_aliases = {} - for option_name, option in self.all_options.items(): - if option.not_an_ansible_option: - continue - comp_aliases[option_name] = option_name - for alias in option.ansible_aliases: - comp_aliases[alias] = option_name - # Process legacy ignore options - if self.module.params['ignore_image']: - self.all_options['image'].comparison = 'ignore' - if self.param_purge_networks: - self.all_options['networks'].comparison = 'strict' - # Process comparsions specified by user - if self.module.params.get('comparisons'): - # If '*' appears in comparisons, process it first - if '*' in self.module.params['comparisons']: - value = self.module.params['comparisons']['*'] - if value not in ('strict', 'ignore'): - self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") - for option in self.all_options.values(): - if option.name == 'networks': - # `networks` is special: only update if - # some value is actually specified - if self.module.params['networks'] is None: - continue - option.comparison = value - # Now process all other comparisons. - comp_aliases_used = {} - for key, value in self.module.params['comparisons'].items(): - if key == '*': - continue - # Find main key - key_main = comp_aliases.get(key) - if key_main is None: - if key_main in all_module_options: - self.fail("The module option '%s' cannot be specified in the comparisons dict, " - "since it does not correspond to container's state!" % key) - if key not in self.all_options or self.all_options[key].not_an_ansible_option: - self.fail("Unknown module option '%s' in comparisons dict!" % key) - key_main = key - if key_main in comp_aliases_used: - self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) - comp_aliases_used[key_main] = key - # Check value and update accordingly - if value in ('strict', 'ignore'): - self.all_options[key_main].comparison = value - elif value == 'allow_more_present': - if self.all_options[key_main].comparison_type == 'value': - self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) - self.all_options[key_main].comparison = value - else: - self.fail("Unknown comparison mode '%s'!" % value) - # Copy values - for option in self.all_options.values(): - if option.copy_comparison_from is not None: - option.comparison = self.all_options[option.copy_comparison_from].comparison - # Check legacy values - if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': - self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': - self.module.warn('The purge_networks option has been overridden by the comparisons option!') - - def _update_params(self): - if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - self.module.params['network_mode'] = self.module.params['networks'][0]['name'] - if self.param_container_default_behavior == 'compatibility': - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory='0', - paused=False, - privileged=False, - read_only=False, - tty=False, - ) - for param, value in old_default_values.items(): - if self.module.params[param] is None: - self.module.params[param] = value - - def _collect_params(self, active_options): - parameters = [] - for options in active_options: - values = {} - engine = options.get_engine('docker_api') - for option in options.options: - if not option.not_an_ansible_option and self.module.params[option.name] is not None: - values[option.name] = self.module.params[option.name] - values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) - parameters.append((options, values)) - return parameters - - def fail(self, *args, **kwargs): - self.client.fail(*args, **kwargs) - - def run(self): - if self.param_state in ('stopped', 'started', 'present'): - self.present(self.param_state) - elif self.param_state == 'absent': - self.absent() - - if not self.check_mode and not self.param_debug: - self.results.pop('actions') - - if self.module._diff or self.param_debug: - self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() - self.results['diff'] = self.diff - - if self.facts: - self.results['container'] = self.facts - - def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): - delay = 1.0 - total_wait = 0 - while True: - # Inspect container - result = self.client.get_container_by_id(container_id) - if result is None: - if accept_removal: - return - msg = 'Encontered vanished container while waiting for container "{0}"' - self.fail(msg.format(container_id)) - # Check container state - state = result.get('State', {}).get('Status') - if complete_states is not None and state in complete_states: - return - if wait_states is not None and state not in wait_states: - msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' - self.fail(msg.format(container_id, state)) - # Wait - if max_wait is not None: - if total_wait > max_wait: - msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' - self.fail(msg.format(container_id, max_wait)) - if total_wait + delay > max_wait: - delay = max_wait - total_wait - sleep(delay) - total_wait += delay - # Exponential backoff, but never wait longer than 10 seconds - # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations - # until the maximal 10 seconds delay is reached. By then, the - # code will have slept for ~1.5 minutes.) - delay = min(delay * 1.1, 10) - - def present(self, state): - container = self._get_container(self.param_name) - was_running = container.running - was_paused = container.paused - container_created = False - - # If the image parameter was passed then we need to deal with the image - # version comparison. Otherwise we handle this depending on whether - # the container already runs or not; in the former case, in case the - # container needs to be restarted, we use the existing container's - # image ID. - image = self._get_image() - self.log(image, pretty_print=True) - if not container.exists or container.removing: - # New container - if container.removing: - self.log('Found container in removal phase') - else: - self.log('No container found') - if not self.param_image: - self.fail('Cannot create container when image is not specified!') - self.diff_tracker.add('exists', parameter=True, active=False) - if container.removing and not self.check_mode: - # Wait for container to be removed before trying to create it - self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) - new_container = self.container_create(self.param_image) - if new_container: - container = new_container - container_created = True - else: - # Existing container - different, differences = self.has_different_configuration(container, image) - image_different = False - if self.all_options['image'].comparison == 'strict': - image_different = self._image_is_different(image, container) - if image_different or different or self.param_recreate: - self.diff_tracker.merge(differences) - self.diff['differences'] = differences.get_legacy_docker_container_diffs() - if image_different: - self.diff['image_different'] = True - self.log("differences") - self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) - image_to_use = self.param_image - if not image_to_use and container and container.Image: - image_to_use = container.Image - if not image_to_use: - self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') - if container.running: - self.container_stop(container.Id) - self.container_remove(container.Id) - if not self.check_mode: - self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) - new_container = self.container_create(image_to_use) - if new_container: - container = new_container - container_created = True - - if container and container.exists: - container = self.update_limits(container, image) - container = self.update_networks(container, container_created) - - if state == 'started' and not container.running: - self.diff_tracker.add('running', parameter=True, active=was_running) - container = self.container_start(container.Id) - elif state == 'started' and self.param_restart: - self.diff_tracker.add('running', parameter=True, active=was_running) - self.diff_tracker.add('restarted', parameter=True, active=False) - container = self.container_restart(container.Id) - elif state == 'stopped' and container.running: - self.diff_tracker.add('running', parameter=False, active=was_running) - self.container_stop(container.Id) - container = self._get_container(container.Id) - - if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: - self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) - if not self.check_mode: - try: - if self.param_paused: - self.client.post_call('/containers/{0}/pause', container.Id) - else: - self.client.post_call('/containers/{0}/unpause', container.Id) - except Exception as exc: - self.fail("Error %s container %s: %s" % ( - "pausing" if self.param_paused else "unpausing", container.Id, to_native(exc) - )) - container = self._get_container(container.Id) - self.results['changed'] = True - self.results['actions'].append(dict(set_paused=self.param_paused)) - - self.facts = container.raw - - def absent(self): - container = self._get_container(self.param_name) - if container.exists: - if container.running: - self.diff_tracker.add('running', parameter=False, active=True) - self.container_stop(container.Id) - self.diff_tracker.add('exists', parameter=False, active=True) - self.container_remove(container.Id) - - def _output_logs(self, msg): - self.module.log(msg=msg) - - def _get_container(self, container): - ''' - Expects container ID or Name. Returns a container object - ''' - return Container(self.client.get_container(container)) - - def _get_image(self): - image_parameter = self.param_image - if not image_parameter: - self.log('No image specified') - return None - if is_image_name_id(image_parameter): - image = self.client.find_image_by_id(image_parameter) - else: - repository, tag = parse_repository_tag(image_parameter) - if not tag: - tag = "latest" - image = self.client.find_image(repository, tag) - if not image or self.param_pull: - if not self.check_mode: - self.log("Pull the image.") - image, alreadyToLatest = self.client.pull_image(repository, tag) - if alreadyToLatest: - self.results['changed'] = False - else: - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - elif not image: - # If the image isn't there, claim we'll pull. - # (Implicitly: if the image is there, claim it already was latest.) - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - - self.log("image") - self.log(image, pretty_print=True) - return image - - def _image_is_different(self, image, container): - if image and image.get('Id'): - if container and container.Image: - if image.get('Id') != container.Image: - self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) - return True - return False - - def _compose_create_parameters(self, image): - params = { - 'Image': image, - } - for options, values in self.parameters: - engine = options.get_engine('docker_api') - if engine.can_set_value(self.client.docker_api_version): - engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) - return params - - def has_different_configuration(self, container, image): - differences = DifferenceTracker() - for options, param_values in self.parameters: - engine = options.get_engine('docker_api') - container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) - expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) - for option in options.options: - if option.name in expected_values: - param_value = expected_values[option.name] - container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) - - if not match: - if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, option, image, container_value, param_value): - continue - # TODO - # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: - # # If the healthcheck is disabled (both in parameters and for the current container), and the user - # # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the - # # expected_healthcheck comparison in this case. - # continue - - # no match. record the differences - p = param_value - c = container_value - if option.comparison_type == 'set': - # Since the order does not matter, sort so that the diff output is better. - if p is not None: - p = sorted(p) - if c is not None: - c = sorted(c) - elif option.comparison_type == 'set(dict)': - # Since the order does not matter, sort so that the diff output is better. - if option.name == 'expected_mounts': - # For selected values, use one entry as key - def sort_key_fn(x): - return x['target'] - else: - # We sort the list of dictionaries by using the sorted items of a dict as its key. - def sort_key_fn(x): - return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) - if p is not None: - p = sorted(p, key=sort_key_fn) - if c is not None: - c = sorted(c, key=sort_key_fn) - differences.add(option.name, parameter=p, active=c) - - has_differences = not differences.empty - return has_differences, differences - - def has_different_resource_limits(self, container, image): - ''' - Diff parameters and container resource limits - ''' - differences = DifferenceTracker() - for options, param_values in self.parameters: - engine = options.get_engine('docker_api') - if not engine.can_update_value(self.client.docker_api_version): - continue - container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) - expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) - for option in options.options: - if option.name in expected_values: - param_value = expected_values[option.name] - container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) - - if not match: - # no match. record the differences - differences.add(option.name, parameter=param_value, active=container_value) - different = not differences.empty - return different, differences - - def _compose_update_parameters(self): - result = {} - for options, values in self.parameters: - engine = options.get_engine('docker_api') - if not engine.can_update_value(self.client.docker_api_version): - continue - engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) - return result - - def update_limits(self, container, image): - limits_differ, different_limits = self.has_different_resource_limits(container, image) - if limits_differ: - self.log("limit differences:") - self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) - self.diff_tracker.merge(different_limits) - if limits_differ and not self.check_mode: - self.container_update(container.Id, self._compose_update_parameters()) - return self._get_container(container.Id) - return container - - def has_network_differences(self, container): - ''' - Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 - ''' - different = False - differences = [] - - if not self.module.params['networks']: - return different, differences - - if not container.container.get('NetworkSettings'): - self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = container.container['NetworkSettings']['Networks'] - for network in self.module.params['networks']: - network_info = connected_networks.get(network['name']) - if network_info is None: - different = True - differences.append(dict( - parameter=network, - container=None - )) - else: - diff = False - network_info_ipam = network_info.get('IPAMConfig') or {} - if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): - diff = True - if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): - diff = True - if network.get('aliases'): - if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): - diff = True - if network.get('links'): - expected_links = [] - for link, alias in network['links']: - expected_links.append("%s:%s" % (link, alias)) - if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): - diff = True - if diff: - different = True - differences.append(dict( - parameter=network, - container=dict( - name=network['name'], - ipv4_address=network_info_ipam.get('IPv4Address'), - ipv6_address=network_info_ipam.get('IPv6Address'), - aliases=network_info.get('Aliases'), - links=network_info.get('Links') - ) - )) - return different, differences - - def has_extra_networks(self, container): - ''' - Check if the container is connected to non-requested networks - ''' - extra_networks = [] - extra = False - - if not container.container.get('NetworkSettings'): - self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = container.container['NetworkSettings'].get('Networks') - if connected_networks: - for network, network_config in connected_networks.items(): - keep = False - if self.module.params['networks']: - for expected_network in self.module.params['networks']: - if expected_network['name'] == network: - keep = True - if not keep: - extra = True - extra_networks.append(dict(name=network, id=network_config['NetworkID'])) - return extra, extra_networks - - def update_networks(self, container, container_created): - updated_container = container - if self.all_options['networks'].comparison != 'ignore' or container_created: - has_network_differences, network_differences = self.has_network_differences(container) - if has_network_differences: - if self.diff.get('differences'): - self.diff['differences'].append(dict(network_differences=network_differences)) - else: - self.diff['differences'] = [dict(network_differences=network_differences)] - for netdiff in network_differences: - self.diff_tracker.add( - 'network.{0}'.format(netdiff['parameter']['name']), - parameter=netdiff['parameter'], - active=netdiff['container'] - ) - self.results['changed'] = True - updated_container = self._add_networks(container, network_differences) - - if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: - has_extra_networks, extra_networks = self.has_extra_networks(container) - if has_extra_networks: - if self.diff.get('differences'): - self.diff['differences'].append(dict(purge_networks=extra_networks)) - else: - self.diff['differences'] = [dict(purge_networks=extra_networks)] - for extra_network in extra_networks: - self.diff_tracker.add( - 'network.{0}'.format(extra_network['name']), - active=extra_network - ) - self.results['changed'] = True - updated_container = self._purge_networks(container, extra_networks) - return updated_container - - def _add_networks(self, container, differences): - for diff in differences: - # remove the container from the network, if connected - if diff.get('container'): - self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) - if not self.check_mode: - try: - self.client.post_json('/networks/{0}/disconnect', diff['parameter']['id'], data={'Container': container.Id}) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], - to_native(exc))) - # connect to the network - params = dict() - for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): - if diff['parameter'].get(para): - value = diff['parameter'][para] - if para == 'links': - value = normalize_links(value) - params[dest_para] = value - self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) - if not self.check_mode: - try: - self.log("Connecting container to network %s" % diff['parameter']['id']) - self.log(params, pretty_print=True) - data = { - 'Container': container.Id, - 'EndpointConfig': params, - } - self.client.post_json('/networks/{0}/connect', diff['parameter']['id'], data=data) - except Exception as exc: - self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) - return self._get_container(container.Id) - - def _purge_networks(self, container, networks): - for network in networks: - self.results['actions'].append(dict(removed_from_network=network['name'])) - if not self.check_mode: - try: - self.client.post_json('/networks/{0}/disconnect', network['name'], data={'Container': container.Id}) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (network['name'], - to_native(exc))) - return self._get_container(container.Id) - - def container_create(self, image): - create_parameters = self._compose_create_parameters(image) - self.log("create container") - self.log("image: %s parameters:" % image) - self.log(create_parameters, pretty_print=True) - self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) - self.results['changed'] = True - new_container = None - if not self.check_mode: - try: - params = {'name': self.param_name} - new_container = self.client.post_json_to_json('/containers/create', data=create_parameters, params=params) - self.client.report_warnings(new_container) - except Exception as exc: - self.fail("Error creating container: %s" % to_native(exc)) - return self._get_container(new_container['Id']) - return new_container - - def container_start(self, container_id): - self.log("start container %s" % (container_id)) - self.results['actions'].append(dict(started=container_id)) - self.results['changed'] = True - if not self.check_mode: - try: - self.client.post_json('/containers/{0}/start', container_id) - except Exception as exc: - self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) - - if self.module.params['detach'] is False: - status = self.client.post_json_as_json('/containers/{0}/wait', container_id)['StatusCode'] - self.client.fail_results['status'] = status - self.results['status'] = status - - if self.module.params['auto_remove']: - output = "Cannot retrieve result as auto_remove is enabled" - if self.param_output_logs: - self.module.warn('Cannot output_logs if auto_remove is enabled!') - else: - config = self.client.get_json('/containers/{0}/json', container_id) - logging_driver = config['HostConfig']['LogConfig']['Type'] - - if logging_driver in ('json-file', 'journald', 'local'): - params = { - 'stderr': 1, - 'stdout': 1, - 'timestamps': 0, - 'follow': 0, - 'tail': 'all', - } - res = self.client._get(self.client._url('/containers/{0}/logs', container_id), params=params) - output = self.client._get_result_tty(False, res, config['Config']['Tty']) - if self.param_output_logs: - self._output_logs(msg=output) - else: - output = "Result logged using `%s` driver" % logging_driver - - if self.param_cleanup: - self.container_remove(container_id, force=True) - insp = self._get_container(container_id) - if insp.raw: - insp.raw['Output'] = output - else: - insp.raw = dict(Output=output) - if status != 0: - # Set `failed` to True and return output as msg - self.results['failed'] = True - self.results['msg'] = output - return insp - return self._get_container(container_id) - - def container_remove(self, container_id, link=False, force=False): - volume_state = (not self.param_keep_volumes) - self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) - self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) - self.results['changed'] = True - if not self.check_mode: - count = 0 - while True: - try: - params = {'v': volume_state, 'link': link, 'force': force} - self.client.delete_call('/containers/{0}', container_id, params=params) - except NotFound as dummy: - pass - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.post_call('/containers/{0}/unpause', container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: - pass - else: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break - - def container_update(self, container_id, update_parameters): - if update_parameters: - self.log("update container %s" % (container_id)) - self.log(update_parameters, pretty_print=True) - self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) - self.results['changed'] = True - if not self.check_mode and callable(getattr(self.client, 'update_container')): - try: - result = self.client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) - self.client.report_warnings(result) - except Exception as exc: - self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_kill(self, container_id): - self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) - self.results['changed'] = True - if not self.check_mode: - try: - params = {} - if self.param_kill_signal is not None: - params['signal'] = int(self.param_kill_signal) - self.client.post_call('/containers/{0}/kill', container_id, params=params) - except Exception as exc: - self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) - - def container_restart(self, container_id): - self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) - self.results['changed'] = True - if not self.check_mode: - try: - timeout = self.module.params['stop_timeout'] or 10 - client_timeout = self.client.timeout - if client_timeout is not None: - client_timeout += timeout - self.client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) - except Exception as exc: - self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_stop(self, container_id): - if self.param_force_kill: - self.container_kill(container_id) - return - self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) - self.results['changed'] = True - if not self.check_mode: - count = 0 - while True: - try: - timeout = self.module.params['stop_timeout'] - if timeout: - params = {'t': timeout} - else: - params = {} - timeout = 10 - client_timeout = self.client.timeout - if client_timeout is not None: - client_timeout += timeout - self.client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.post_call('/containers/{0}/unpause', container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break - - -def main(): - argument_spec = dict( - cleanup=dict(type='bool', default=False), - comparisons=dict(type='dict'), - container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct']), - default_host_ip=dict(type='str'), - force_kill=dict(type='bool', default=False, aliases=['forcekill']), - ignore_image=dict(type='bool', default=False), - image=dict(type='str'), - image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - keep_volumes=dict(type='bool', default=True), - kill_signal=dict(type='str'), - name=dict(type='str', required=True), - networks=dict(type='list', elements='dict', options=dict( - name=dict(type='str', required=True), - ipv4_address=dict(type='str'), - ipv6_address=dict(type='str'), - aliases=dict(type='list', elements='str'), - links=dict(type='list', elements='str'), - )), - networks_cli_compatible=dict(type='bool', default=True), - output_logs=dict(type='bool', default=False), - paused=dict(type='bool'), - pull=dict(type='bool', default=False), - purge_networks=dict(type='bool', default=False), - recreate=dict(type='bool', default=False), - removal_wait_timeout=dict(type='float'), - restart=dict(type='bool', default=False), - state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), - stop_signal=dict(type='str'), - ) - - mutually_exclusive = [] - required_together = [] - required_one_of = [] - required_if = [ - ('state', 'present', ['image']) - ] - required_by = {} - - option_minimal_versions = {} - - active_options = [] - for options in OPTIONS: - if not options.supports_engine('docker_api'): - continue - - mutually_exclusive.extend(options.ansible_mutually_exclusive) - required_together.extend(options.ansible_required_together) - required_one_of.extend(options.ansible_required_one_of) - required_if.extend(options.ansible_required_if) - required_by.update(options.ansible_required_by) - argument_spec.update(options.argument_spec) - - engine = options.get_engine('docker_api') - if engine.min_docker_api is not None: - for option in options.options: - if not option.not_an_ansible_option: - option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} - - active_options.append(options) - - client = AnsibleDockerClient( - argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - required_together=required_together, - required_one_of=required_one_of, - required_if=required_if, - required_by=required_by, - option_minimal_versions=option_minimal_versions, - supports_check_mode=True, - ) - - try: - cm = ContainerManager(client.module, client, active_options) - cm.run() - client.module.exit_json(**sanitize_result(cm.results)) - except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) - except RequestException as e: - client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), - exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() From 16f63eee8de61187407ccbf8a3a7cc73167ebc94 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 23:18:23 +0200 Subject: [PATCH 16/38] Fix exposed ports. --- plugins/module_utils/module_container.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 549d4efea..c62a34e26 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -1202,7 +1202,16 @@ def _get_expected_values_ports(module, client, api_version, options, image, valu def _set_values_ports(module, data, api_version, options, values): if 'ports' in values: - data['ExposedPorts'] = values['ports'] + exposed_ports = {} + for port_definition in values['ports']: + port = port_definition + proto = 'tcp' + if isinstance(port_definition, tuple): + if len(port_definition) == 2: + proto = port_definition[1] + port = port_definition[0] + exposed_ports['%s/%s' % (port, proto)] = {} + data['ExposedPorts'] = exposed_ports if 'published_ports' in values: if 'HostConfig' not in data: data['HostConfig'] = {} From 42178cf2483cc9dacbd293ea5459c96a461b49ce Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 10 Jul 2022 23:21:17 +0200 Subject: [PATCH 17/38] Fix bugs. --- plugins/module_utils/module_container.py | 2 +- plugins/modules/docker_container.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index c62a34e26..ff9f08ad4 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -1261,7 +1261,7 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('blkio_weight', type='int') - .add_docker_api(DockerAPIEngine.config_value('BlkioWeight', update_parameter='BlkioWeight')), + .add_docker_api(DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight')), OptionGroup() .add_option('capabilities', type='set', elements='str') diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index beceaf804..718f98c1a 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -2010,7 +2010,7 @@ def container_update(self, container_id, update_parameters): self.log(update_parameters, pretty_print=True) self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) self.results['changed'] = True - if not self.check_mode and callable(getattr(self.client, 'update_container')): + if not self.check_mode: try: result = self.client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) self.client.report_warnings(result) From 235afbb31872c9abf2642abef0a728b060cbeb70 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 06:44:57 +0200 Subject: [PATCH 18/38] Fix command and entrypoint. --- plugins/module_utils/module_container.py | 42 ++++++++++++++++++------ 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index ff9f08ad4..452f69cf3 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -206,7 +206,11 @@ def preprocess_value_(module, client, api_version, options, values): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') if preprocess_value is not None and options[0].name in values: - values[options[0].name] = preprocess_value(module, client, api_version, values[options[0].name]) + value = preprocess_value(module, client, api_version, values[options[0].name]) + if value is None: + del values[options[0].name] + else: + values[options[0].name] = value return values def get_value(module, container, api_version, options): @@ -278,7 +282,11 @@ def preprocess_value_(module, client, api_version, options, values): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') if preprocess_value is not None and options[0].name in values: - values[options[0].name] = preprocess_value(module, client, api_version, values[options[0].name]) + value = preprocess_value(module, client, api_version, values[options[0].name]) + if value is None: + del values[options[0].name] + else: + values[options[0].name] = value return values def get_value(module, container, api_version, options): @@ -444,7 +452,10 @@ def _set_value_detach_interactive(module, data, api_version, options, values): data['StdinOnce'] = True -def _preprocess_command(module, client, api_version, value): +def _preprocess_command(module, values): + if 'command' not in values: + return values + value = values['command'] if module.params['command_handling'] == 'correct': if value is not None: if not isinstance(value, list): @@ -459,10 +470,17 @@ def _preprocess_command(module, client, api_version, value): else: value = shlex.split(to_text(value, errors='surrogate_or_strict')) value = [to_text(x, errors='surrogate_or_strict') for x in value] - return value + else: + return {} + return { + 'command': value, + } -def _preprocess_entrypoint(module, api_version, value): +def _preprocess_entrypoint(module, values): + if 'entrypoint' not in values: + return values + value = values['entrypoint'] if module.params['command_handling'] == 'correct': if value is not None: value = [to_text(x, errors='surrogate_or_strict') for x in value] @@ -470,7 +488,11 @@ def _preprocess_entrypoint(module, api_version, value): # convert from list to str. value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) value = [to_text(x, errors='surrogate_or_strict') for x in value] - return value + else: + return {} + return { + 'entrypoint': value, + } def _preprocess_env(module, values): @@ -1275,9 +1297,9 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('cgroup_parent', type='str') .add_docker_api(DockerAPIEngine.host_config_value('CgroupParent')), - OptionGroup() + OptionGroup(preprocess=_preprocess_command) .add_option('command', type='list', elements='str', ansible_type='raw') - .add_docker_api(DockerAPIEngine.config_value('Cmd', preprocess_value=_preprocess_command)), + .add_docker_api(DockerAPIEngine.config_value('Cmd')), OptionGroup() .add_option('cpu_period', type='int') @@ -1299,9 +1321,9 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('cpu_shares', type='int') .add_docker_api(DockerAPIEngine.config_value('CpusetMems', update_parameter='CpusetMems')), - OptionGroup() + OptionGroup(preprocess=_preprocess_entrypoint) .add_option('entrypoint', type='list', elements='str') - .add_docker_api(DockerAPIEngine.config_value('Entrypoint', preprocess_value=_preprocess_command)), + .add_docker_api(DockerAPIEngine.config_value('Entrypoint')), OptionGroup() .add_option('cpus', type='int', ansible_type='float') From a49cbb5ba1a6d05b86e048a50629b1e4c7d65409 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 07:11:05 +0200 Subject: [PATCH 19/38] More fixes. --- plugins/module_utils/module_container.py | 17 +++++++++-------- plugins/modules/docker_container.py | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 452f69cf3..bd49348f0 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -510,7 +510,7 @@ def _preprocess_env(module, values): 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) final_env[name] = to_text(value, errors='surrogate_or_strict') formatted_env = [] - for key, value in final_env: + for key, value in final_env.items(): formatted_env.append('%s=%s' % (key, value)) return { 'env': formatted_env, @@ -697,8 +697,9 @@ def _ignore_mismatching_label_result(module, client, api_version, option, image, # base_image_mismatch is fail we want raise an error. image_labels = _get_image_labels(image) would_remove_labels = [] + labels_param = module.params['labels'] or {} for label in image_labels: - if label not in module.params['labels'] or {}: + if label not in labels_param: # Format label for error message would_remove_labels.append('"%s"' % (label, )) if would_remove_labels: @@ -1303,23 +1304,23 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('cpu_period', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpuPeriod', update_parameter='CpuPeriod')), + .add_docker_api(DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod')), OptionGroup() .add_option('cpu_quota', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpuQuota', update_parameter='CpuQuota')), + .add_docker_api(DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota')), OptionGroup() .add_option('cpuset_cpus', type='str') - .add_docker_api(DockerAPIEngine.config_value('CpuShares', update_parameter='CpuShares')), + .add_docker_api(DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus')), OptionGroup() .add_option('cpuset_mems', type='str') - .add_docker_api(DockerAPIEngine.config_value('CpusetCpus', update_parameter='CpusetCpus')), + .add_docker_api(DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems')), OptionGroup() .add_option('cpu_shares', type='int') - .add_docker_api(DockerAPIEngine.config_value('CpusetMems', update_parameter='CpusetMems')), + .add_docker_api(DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares')), OptionGroup(preprocess=_preprocess_entrypoint) .add_option('entrypoint', type='list', elements='str') @@ -1335,7 +1336,7 @@ def _preprocess_container_names(module, client, api_version, value): .add_docker_api(DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive)), OptionGroup() - .add_option('devices', type='set', elements='str') + .add_option('devices', type='set', elements='dict', ansible_elements='str') .add_docker_api(DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)), OptionGroup() diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 718f98c1a..7cfa07db2 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1927,7 +1927,7 @@ def container_start(self, container_id): self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) if self.module.params['detach'] is False: - status = self.client.post_json_as_json('/containers/{0}/wait', container_id)['StatusCode'] + status = self.client.post_json_to_json('/containers/{0}/wait', container_id)['StatusCode'] self.client.fail_results['status'] = status self.results['status'] = status @@ -2090,7 +2090,7 @@ def main(): cleanup=dict(type='bool', default=False), comparisons=dict(type='dict'), container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct']), + command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), default_host_ip=dict(type='str'), force_kill=dict(type='bool', default=False, aliases=['forcekill']), ignore_image=dict(type='bool', default=False), From e04017fb9b12efb3501a67f142f6322f9c33eeff Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 12:08:22 +0200 Subject: [PATCH 20/38] Fix more bugs. --- plugins/module_utils/module_container.py | 152 ++++++++++++++++++----- plugins/modules/docker_container.py | 64 +++++----- 2 files changed, 155 insertions(+), 61 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index bd49348f0..3c4539f02 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -2,6 +2,7 @@ # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +import json import os import re import shlex @@ -184,7 +185,8 @@ def __init__( self.get_value = get_value self.set_value = set_value self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values) - self.ignore_mismatching_result = ignore_mismatching_result or (lambda module, client, api_version, option, image, container_value, expected_value: False) + self.ignore_mismatching_result = ignore_mismatching_result or \ + (lambda module, client, api_version, option, image, container_value, expected_value: False) self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values) self.update_value = update_value self.can_set_value = can_set_value or (lambda api_version: set_value is not None) @@ -427,9 +429,9 @@ def _get_default_host_ip(module, client): def _get_value_detach_interactive(module, container, api_version, options): - attach_stdin = container.get('AttachStdin') - attach_stderr = container.get('AttachStderr') - attach_stdout = container.get('AttachStdout') + attach_stdin = container['Config'].get('OpenStdin') + attach_stderr = container['Config'].get('AttachStderr') + attach_stdout = container['Config'].get('AttachStdout') return { 'interactive': bool(attach_stdin), 'detach': not (attach_stderr and attach_stdout), @@ -444,6 +446,7 @@ def _set_value_detach_interactive(module, data, api_version, options, values): data['AttachStderr'] = False data['AttachStdin'] = False data['StdinOnce'] = False + data['OpenStdin'] = interactive if not detach: data['AttachStdout'] = True data['AttachStderr'] = True @@ -627,13 +630,13 @@ def _preprocess_healthcheck(module, client, api_version, value): healthcheck = {'test': ['NONE']} if not healthcheck: return None - return { + return omit_none_from_dict({ 'Test': healthcheck.get('test'), 'Interval': healthcheck.get('interval'), 'Timeout': healthcheck.get('timeout'), 'StartPeriod': healthcheck.get('start_period'), 'Retries': healthcheck.get('retries'), - } + }) def _postprocess_healthcheck_get_value(module, api_version, value, sentry): @@ -647,7 +650,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): return values try: value = values[name] - if unlimited_value is not None and value == 'unlimited': + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): value = unlimited_value else: value = human_to_bytes(value) @@ -717,13 +720,80 @@ def _preprocess_mac_address(module, values): } -def _get_expected_sysctls_value(module, client, api_version, image, value, sentry): - if value is sentry: - return value - result = {} - for key, sysctl_value in value: - result[key] = to_text(sysctl_value, errors='surrogate_or_strict') - return result +def _preprocess_networks(module, values): + if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + values['network_mode'] = values['networks'][0]['name'] + + if 'networks' in values: + for network in values['networks']: + if network['links']: + parsed_links = [] + for link in network['links']: + parsed_link = link.split(':', 1) + if len(parsed_link) == 1: + parsed_link = (link, link) + parsed_links.append(tuple(parsed_link)) + network['links'] = parsed_links + + return values + + +def _ignore_mismatching_network_result(module, client, api_version, option, image, container_value, expected_value): + # 'networks' is handled out-of-band + if option.name == 'networks': + return True + return False + + +def _preprocess_network_values(module, client, api_version, options, values): + if 'networks' in values: + for network in values['networks']: + network['id'] = _get_network_id(module, client, network['name']) + if not network['id']: + module.fail_json(msg="Parameter error: network named %s could not be found. Does it exist?" % (network['name'], )) + + if 'network_mode' in values: + values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode']) + + return values + + +def _get_network_id(module, client, network_name): + try: + network_id = None + params = {'filters': json.dumps({'name': [network_name]})} + for network in client.get_json('/networks', params=params): + if network['Name'] == network_name: + network_id = network['Id'] + break + return network_id + except Exception as exc: + module.fail_json(msg="Error getting network id for %s - %s" % (network_name, to_native(exc))) + + +def _get_values_network(module, container, api_version, options): + value = container['HostConfig'].get('NetworkMode', _SENTRY) + if value is _SENTRY: + return {} + return {'network_mode': value} + + +def _set_values_network(module, data, api_version, options, values): + if 'network_mode' not in values: + return + if 'HostConfig' not in data: + data['HostConfig'] = {} + value = values['network_mode'] + data['HostConfig']['NetworkMode'] = value + + +def _preprocess_sysctls(module, values): + if 'sysctls' in values: + for key, value in values['sysctls'].items(): + values['sysctls'][key] = to_text(value, errors='surrogate_or_strict') + return values def _preprocess_tmpfs(module, values): @@ -786,7 +856,9 @@ def check_collision(t, name): module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): if mount[option] is not None and mount_type != req_mount_type: - module.fail_json(msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)) + module.fail_json( + msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type) + ) # Streamline options volume_options = mount_dict.pop('volume_options') @@ -811,8 +883,8 @@ def check_collision(t, name): if 'volumes' in values: new_vols = [] for vol in values['volumes']: + parts = vol.split(':') if ':' in vol: - parts = vol.split(':') if len(parts) == 3: host, container, mode = parts if not _is_volume_permissions(mode): @@ -828,7 +900,7 @@ def check_collision(t, name): check_collision(parts[1], 'volumes') new_vols.append("%s:%s:rw" % (host, parts[1])) continue - check_collision(vol.split(':', 1)[0], 'volumes') + check_collision(parts[min(1, len(parts) - 1)], 'volumes') new_vols.append(vol) values['volumes'] = new_vols new_binds = [] @@ -934,7 +1006,7 @@ def _get_expected_values_mounts(module, client, api_version, options, image, val if len(parts) == 2: if not _is_volume_permissions(parts[1]): continue - expected_vols[vol] = dict() + expected_vols[vol] = {} if expected_vols: expected_values['volumes'] = expected_vols @@ -998,6 +1070,14 @@ def _set_values_mounts(module, data, api_version, options, values): if 'volumes' in values: volumes = {} for volume in values['volumes']: + # Only pass anonymous volumes to create container + if ':' in volume: + parts = volume.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not _is_volume_permissions(parts[1]): + continue volumes[volume] = {} data['Volumes'] = volumes if 'volume_binds' in values: @@ -1404,7 +1484,7 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('groups', type='set', elements='str') - .add_docker_api(DockerAPIEngine.config_value('GroupAdd')), + .add_docker_api(DockerAPIEngine.host_config_value('GroupAdd')), OptionGroup() .add_option('healthcheck', type='dict', ansible_suboptions=dict( @@ -1414,7 +1494,7 @@ def _preprocess_container_names(module, client, api_version, value): start_period=dict(type='str'), retries=dict(type='int'), )) - .add_docker_api(DockerAPIEngine.config_value('GroupAdd', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), + .add_docker_api(DockerAPIEngine.config_value('Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), OptionGroup() .add_option('hostname', type='str') @@ -1434,11 +1514,12 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup() .add_option('labels', type='dict', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.config_value('Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)), + .add_docker_api(DockerAPIEngine.config_value( + 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)), OptionGroup() .add_option('links', type='set', elements='list', ansible_elements='str') - .add_docker_api(DockerAPIEngine.config_value('Links', preprocess_value=_preprocess_links)), + .add_docker_api(DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links)), OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']}) .add_option('log_driver', type='str') @@ -1472,9 +1553,21 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('stop_timeout', type='int', default_comparison='ignore') .add_docker_api(DockerAPIEngine.config_value('StopTimeout')), - OptionGroup() + OptionGroup(preprocess=_preprocess_networks) .add_option('network_mode', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('NetworkMode', preprocess_value=_preprocess_container_names)), + .add_option('networks', type='set', elements='dict', ansible_suboptions=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )) + .add_docker_api(DockerAPIEngine( + preprocess_value=_preprocess_network_values, + get_value=_get_values_network, + set_value=_set_values_network, + ignore_mismatching_result=_ignore_mismatching_network_result, + )), OptionGroup() .add_option('oom_killer', type='bool') @@ -1503,7 +1596,6 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) .add_option('restart_retries', type='int') - .add_docker_api(..., ) .add_docker_api(DockerAPIEngine( get_value=_get_values_restart, set_value=_set_values_restart, @@ -1522,13 +1614,17 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('shm_size', type='int', ansible_type='str') .add_docker_api(DockerAPIEngine.host_config_value('ShmSize')), + OptionGroup() + .add_option('stop_signal', type='str') + .add_docker_api(DockerAPIEngine.config_value('StopSignal')), + OptionGroup() .add_option('storage_opts', type='dict', needs_no_suboptions=True) .add_docker_api(DockerAPIEngine.host_config_value('StorageOpt')), - OptionGroup() + OptionGroup(preprocess=_preprocess_sysctls) .add_option('sysctls', type='dict', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.host_config_value('Sysctls', get_expected_value=_get_expected_sysctls_value)), + .add_docker_api(DockerAPIEngine.host_config_value('Sysctls')), OptionGroup(preprocess=_preprocess_tmpfs) .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str') @@ -1592,7 +1688,7 @@ def _preprocess_container_names(module, client, api_version, value): OptionGroup(preprocess=_preprocess_ports) .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose']) .add_option('publish_all_ports', type='bool') - .add_option('published_ports', type='set', elements='str', ansible_aliases=['ports']) + .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports']) .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore') .add_docker_api(DockerAPIEngine( get_value=_get_values_ports, diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 7cfa07db2..02fa22d55 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1301,7 +1301,6 @@ def __init__(self, module, client, active_options): self.param_state = self.module.params['state'] self._parse_comparisons() self._update_params() - self.parameters = self._collect_params(active_options) self.results = {'changed': False, 'actions': []} self.diff = {} self.diff_tracker = DifferenceTracker() @@ -1319,7 +1318,6 @@ def __init__(self, module, client, active_options): self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) - def _collect_all_options(self, active_options): all_options = {} for options in active_options: @@ -1427,19 +1425,6 @@ def _update_params(self): if self.module.params[param] is None: self.module.params[param] = value - def _collect_params(self, active_options): - parameters = [] - for options in active_options: - values = {} - engine = options.get_engine('docker_api') - for option in options.options: - if not option.not_an_ansible_option and self.module.params[option.name] is not None: - values[option.name] = self.module.params[option.name] - values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) - parameters.append((options, values)) - return parameters - def fail(self, *args, **kwargs): self.client.fail(*args, **kwargs) @@ -1492,7 +1477,21 @@ def wait_for_state(self, container_id, complete_states=None, wait_states=None, a # code will have slept for ~1.5 minutes.) delay = min(delay * 1.1, 10) + def _collect_params(self, active_options): + parameters = [] + for options in active_options: + values = {} + engine = options.get_engine('docker_api') + for option in options.options: + if not option.not_an_ansible_option and self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) + parameters.append((options, values)) + return parameters + def present(self, state): + self.parameters = self._collect_params(self.options) container = self._get_container(self.param_name) was_running = container.running was_paused = container.paused @@ -1665,7 +1664,8 @@ def has_different_configuration(self, container, image): match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) if not match: - if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, option, image, container_value, param_value): + if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, + option, image, container_value, param_value): continue # TODO # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: @@ -1866,18 +1866,24 @@ def _add_networks(self, container, differences): self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], to_native(exc))) # connect to the network - params = dict() - for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): - if diff['parameter'].get(para): - value = diff['parameter'][para] - if para == 'links': - value = normalize_links(value) - params[dest_para] = value - self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) if not self.check_mode: try: self.log("Connecting container to network %s" % diff['parameter']['id']) - self.log(params, pretty_print=True) + self.log(diff['parameter'], pretty_print=True) + params = {} + for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): + if diff['parameter'].get(para): + value = diff['parameter'][para] + if para == 'links': + value = normalize_links(value) + params[dest_para] = value + ipam_config = {} + for param in ('IPv4Address', 'IPv6Address'): + if param in params: + ipam_config[param] = params.pop(param) + if ipam_config: + params['IPAMConfig'] = ipam_config data = { 'Container': container.Id, 'EndpointConfig': params, @@ -2099,13 +2105,6 @@ def main(): keep_volumes=dict(type='bool', default=True), kill_signal=dict(type='str'), name=dict(type='str', required=True), - networks=dict(type='list', elements='dict', options=dict( - name=dict(type='str', required=True), - ipv4_address=dict(type='str'), - ipv6_address=dict(type='str'), - aliases=dict(type='list', elements='str'), - links=dict(type='list', elements='str'), - )), networks_cli_compatible=dict(type='bool', default=True), output_logs=dict(type='bool', default=False), paused=dict(type='bool'), @@ -2115,7 +2114,6 @@ def main(): removal_wait_timeout=dict(type='float'), restart=dict(type='bool', default=False), state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), - stop_signal=dict(type='str'), ) mutually_exclusive = [] From 2a17ab7e1e21713c6c2e384f51452ad15aa70460 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 12:12:06 +0200 Subject: [PATCH 21/38] ci_complete From c6476d4e51ed36314e2b8b0d431ff606ddb05420 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 12:26:33 +0200 Subject: [PATCH 22/38] Lint, fix Python 2.7 bugs, work around ansible-test bug. ci_complete --- plugins/module_utils/common.py | 2 +- plugins/module_utils/common_api.py | 2 +- plugins/module_utils/module_container.py | 8 ++++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 01ee330f3..e889baee8 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -588,7 +588,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, - required_by=required_by, + required_by=required_by or {}, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/common_api.py b/plugins/module_utils/common_api.py index e0eaeded1..0ab2111f4 100644 --- a/plugins/module_utils/common_api.py +++ b/plugins/module_utils/common_api.py @@ -498,7 +498,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, - required_by=required_by, + required_by=required_by or {}, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 3c4539f02..e7f6e94ae 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -2,6 +2,9 @@ # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + import json import os import re @@ -1338,7 +1341,7 @@ def _preprocess_value_ports(module, client, api_version, options, values): default_ip = _get_default_host_ip(module, client) for port, port_spec in values['published_ports'].items(): if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING: - values['published_ports'][port] = (default_ip, *port_spec[1:]) + values['published_ports'][port] = tuple([default_ip] + list(port_spec[1:])) return values @@ -1494,7 +1497,8 @@ def _preprocess_container_names(module, client, api_version, value): start_period=dict(type='str'), retries=dict(type='int'), )) - .add_docker_api(DockerAPIEngine.config_value('Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), + .add_docker_api(DockerAPIEngine.config_value( + 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), OptionGroup() .add_option('hostname', type='str') From 322c024566796edaad6abcf61efe1f1c99bf744d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 12:29:42 +0200 Subject: [PATCH 23/38] Remove no longer applicable test. ci_complete --- .../plugins/modules/test_docker_container.py | 22 ------------------- 1 file changed, 22 deletions(-) delete mode 100644 tests/unit/plugins/modules/test_docker_container.py diff --git a/tests/unit/plugins/modules/test_docker_container.py b/tests/unit/plugins/modules/test_docker_container.py deleted file mode 100644 index 00701961f..000000000 --- a/tests/unit/plugins/modules/test_docker_container.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import unittest - -from ansible_collections.community.docker.plugins.modules.docker_container import TaskParameters - - -class TestTaskParameters(unittest.TestCase): - """Unit tests for TaskParameters.""" - - def test_parse_exposed_ports_tcp_udp(self): - """ - Ensure _parse_exposed_ports does not cancel ports with the same - number but different protocol. - """ - task_params = TaskParameters.__new__(TaskParameters) - task_params.exposed_ports = None - result = task_params._parse_exposed_ports([80, '443', '443/udp']) - self.assertTrue((80, 'tcp') in result) - self.assertTrue((443, 'tcp') in result) - self.assertTrue((443, 'udp') in result) From 325e451e982919aac43091dfd98098861bf16a8e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 14:05:55 +0200 Subject: [PATCH 24/38] Remove unnecessary ignore. ci_complete --- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.12.txt | 1 - tests/sanity/ignore-2.13.txt | 1 - tests/sanity/ignore-2.14.txt | 1 - tests/sanity/ignore-2.9.txt | 1 - 6 files changed, 6 deletions(-) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index fc12c3046..f999281cb 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,4 +5,3 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index fc12c3046..f999281cb 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -5,4 +5,3 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 9be213a02..5d5b2fd4c 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,3 +1,2 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index 878e37963..2dc9aec2e 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,2 +1 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 878e37963..2dc9aec2e 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,2 +1 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 8cdc50bd0..a0dc80661 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -4,4 +4,3 @@ .azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path From d43b4a379fd497c7f562c65a531ccfd45604defa Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 14:45:00 +0200 Subject: [PATCH 25/38] Start with engine driver. --- plugins/module_utils/module_container.py | 119 ++++++++++++++++++++--- plugins/modules/docker_container.py | 112 ++++++++------------- 2 files changed, 145 insertions(+), 86 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index e7f6e94ae..30d793575 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +import abc import json import os import re @@ -16,7 +17,10 @@ from ansible.module_utils.common.text.formatters import human_to_bytes from ansible.module_utils.six import string_types -from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) from ansible_collections.community.docker.plugins.module_utils.util import ( clean_dict_booleans_for_docker_api, @@ -24,6 +28,8 @@ parse_healthcheck, ) +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( parse_env_file, convert_port_bindings, @@ -166,11 +172,100 @@ def add_docker_api(self, docker_api): _SENTRY = object() -class DockerAPIEngineDriver(object): - pass +class Engine(object): + min_api_version = None # string or None + min_api_version_obj = None # LooseVersion object or None + + + @abc.abstractmethod + def get_value(self, module, container, api_version, options): + pass + + @abc.abstractmethod + def set_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def get_expected_values(self, module, client, api_version, options, image, values): + pass + + @abc.abstractmethod + def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value): + pass + + @abc.abstractmethod + def preprocess_value(self, module, client, api_version, options, values): + pass + + @abc.abstractmethod + def update_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def can_set_value(self, api_version): + pass + + @abc.abstractmethod + def can_update_value(self, api_version): + pass + + +class EngineDriver(object): + name = None # string + + @abc.abstractmethod + def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): + # Return (module, active_options, client) + pass + + +class DockerAPIEngineDriver(EngineDriver): + name = 'docker_api' + + def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): + argument_spec = argument_spec or {} + mutually_exclusive = mutually_exclusive or [] + required_together = required_together or [] + required_one_of = required_one_of or [] + required_if = required_if or [] + required_by = required_by or {} + + active_options = [] + option_minimal_versions = {} + for options in OPTIONS: + if not options.supports_engine(self.name): + continue + + mutually_exclusive.extend(options.ansible_mutually_exclusive) + required_together.extend(options.ansible_required_together) + required_one_of.extend(options.ansible_required_one_of) + required_if.extend(options.ansible_required_if) + required_by.update(options.ansible_required_by) + argument_spec.update(options.argument_spec) + + engine = options.get_engine(self.name) + if engine.min_api_version is not None: + for option in options.options: + if not option.not_an_ansible_option: + option_minimal_versions[option.name] = {'docker_api_version': engine.min_api_version} + + active_options.append(options) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + required_if=required_if, + required_by=required_by, + option_minimal_versions=option_minimal_versions, + supports_check_mode=True, + ) + + return client.module, active_options, client -class DockerAPIEngine(object): +class DockerAPIEngine(Engine): def __init__( self, get_value, @@ -181,10 +276,10 @@ def __init__( update_value=None, can_set_value=None, can_update_value=None, - min_docker_api=None, + min_api_version=None, ): - self.min_docker_api = min_docker_api - self.min_docker_api_obj = None if min_docker_api is None else LooseVersion(min_docker_api) + self.min_api_version = min_api_version + self.min_api_version_obj = None if min_api_version is None else LooseVersion(min_api_version) self.get_value = get_value self.set_value = set_value self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values) @@ -203,7 +298,7 @@ def config_value( preprocess_for_set=None, get_expected_value=None, ignore_mismatching_result=None, - min_docker_api=None, + min_api_version=None, preprocess_value=None, update_parameter=None, ): @@ -267,7 +362,7 @@ def update_value(module, data, api_version, options, values): get_expected_values=get_expected_values_, ignore_mismatching_result=ignore_mismatching_result, set_value=set_value, - min_docker_api=min_docker_api, + min_api_version=min_api_version, update_value=update_value, ) @@ -279,7 +374,7 @@ def host_config_value( preprocess_for_set=None, get_expected_value=None, ignore_mismatching_result=None, - min_docker_api=None, + min_api_version=None, preprocess_value=None, update_parameter=None, ): @@ -345,7 +440,7 @@ def update_value(module, data, api_version, options, values): get_expected_values=get_expected_values_, ignore_mismatching_result=ignore_mismatching_result, set_value=set_value, - min_docker_api=min_docker_api, + min_api_version=min_api_version, update_value=update_value, ) @@ -1458,7 +1553,7 @@ def _preprocess_container_names(module, client, api_version, value): driver=dict(type='str'), options=dict(type='dict'), )) - .add_docker_api(DockerAPIEngine.host_config_value('DeviceRequests', min_docker_api='1.40', preprocess_value=_preprocess_device_requests)), + .add_docker_api(DockerAPIEngine.host_config_value('DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests)), OptionGroup() .add_option('dns_servers', type='list', elements='str') diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 02fa22d55..bcba28225 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1212,12 +1212,10 @@ from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion from ansible_collections.community.docker.plugins.module_utils.common_api import ( - AnsibleDockerClient, RequestException, ) from ansible_collections.community.docker.plugins.module_utils.module_container import ( DockerAPIEngineDriver, - OPTIONS, Option, ) from ansible_collections.community.docker.plugins.module_utils.util import ( @@ -1274,11 +1272,12 @@ def paused(self): class ContainerManager(DockerBaseClass): - def __init__(self, module, client, active_options): + def __init__(self, module, engine_driver, client, active_options): + self.module = module + self.engine_driver = engine_driver self.client = client self.options = active_options self.all_options = self._collect_all_options(active_options) - self.module = module self.check_mode = self.module.check_mode self.param_cleanup = self.module.params['cleanup'] self.param_container_default_behavior = self.module.params['container_default_behavior'] @@ -1325,7 +1324,6 @@ def _collect_all_options(self, active_options): all_options[option.name] = option for option in [ Option('image', 'str', None), - Option('networks', 'set', None, elements='dict', ansible_suboptions={}), ]: all_options[option.name] = option return all_options @@ -1481,7 +1479,7 @@ def _collect_params(self, active_options): parameters = [] for options in active_options: values = {} - engine = options.get_engine('docker_api') + engine = options.get_engine(self.engine_driver.name) for option in options.options: if not option.not_an_ansible_option and self.module.params[option.name] is not None: values[option.name] = self.module.params[option.name] @@ -1646,7 +1644,7 @@ def _compose_create_parameters(self, image): 'Image': image, } for options, values in self.parameters: - engine = options.get_engine('docker_api') + engine = options.get_engine(self.engine_driver.name) if engine.can_set_value(self.client.docker_api_version): engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) return params @@ -1654,7 +1652,7 @@ def _compose_create_parameters(self, image): def has_different_configuration(self, container, image): differences = DifferenceTracker() for options, param_values in self.parameters: - engine = options.get_engine('docker_api') + engine = options.get_engine(self.engine_driver.name) container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) for option in options.options: @@ -1708,7 +1706,7 @@ def has_different_resource_limits(self, container, image): ''' differences = DifferenceTracker() for options, param_values in self.parameters: - engine = options.get_engine('docker_api') + engine = options.get_engine(self.engine_driver.name) if not engine.can_update_value(self.client.docker_api_version): continue container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) @@ -1728,7 +1726,7 @@ def has_different_resource_limits(self, container, image): def _compose_update_parameters(self): result = {} for options, values in self.parameters: - engine = options.get_engine('docker_api') + engine = options.get_engine(self.engine_driver.name) if not engine.can_update_value(self.client.docker_api_version): continue engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) @@ -2092,73 +2090,39 @@ def container_stop(self, container_id): def main(): - argument_spec = dict( - cleanup=dict(type='bool', default=False), - comparisons=dict(type='dict'), - container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), - default_host_ip=dict(type='str'), - force_kill=dict(type='bool', default=False, aliases=['forcekill']), - ignore_image=dict(type='bool', default=False), - image=dict(type='str'), - image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - keep_volumes=dict(type='bool', default=True), - kill_signal=dict(type='str'), - name=dict(type='str', required=True), - networks_cli_compatible=dict(type='bool', default=True), - output_logs=dict(type='bool', default=False), - paused=dict(type='bool'), - pull=dict(type='bool', default=False), - purge_networks=dict(type='bool', default=False), - recreate=dict(type='bool', default=False), - removal_wait_timeout=dict(type='float'), - restart=dict(type='bool', default=False), - state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), - ) - - mutually_exclusive = [] - required_together = [] - required_one_of = [] - required_if = [ - ('state', 'present', ['image']) - ] - required_by = {} - - option_minimal_versions = {} - - active_options = [] - for options in OPTIONS: - if not options.supports_engine('docker_api'): - continue - - mutually_exclusive.extend(options.ansible_mutually_exclusive) - required_together.extend(options.ansible_required_together) - required_one_of.extend(options.ansible_required_one_of) - required_if.extend(options.ansible_required_if) - required_by.update(options.ansible_required_by) - argument_spec.update(options.argument_spec) - - engine = options.get_engine('docker_api') - if engine.min_docker_api is not None: - for option in options.options: - if not option.not_an_ansible_option: - option_minimal_versions[option.name] = {'docker_api_version': engine.min_docker_api} - - active_options.append(options) - - client = AnsibleDockerClient( - argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - required_together=required_together, - required_one_of=required_one_of, - required_if=required_if, - required_by=required_by, - option_minimal_versions=option_minimal_versions, - supports_check_mode=True, + engine_driver = DockerAPIEngineDriver() + + module, active_options, client = engine_driver.setup( + argument_spec = dict( + cleanup=dict(type='bool', default=False), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), + default_host_ip=dict(type='str'), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), + keep_volumes=dict(type='bool', default=True), + kill_signal=dict(type='str'), + name=dict(type='str', required=True), + networks_cli_compatible=dict(type='bool', default=True), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), + restart=dict(type='bool', default=False), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + ), + required_if = [ + ('state', 'present', ['image']) + ], ) try: - cm = ContainerManager(client.module, client, active_options) + cm = ContainerManager(module, engine_driver, client, active_options) cm.run() client.module.exit_json(**sanitize_result(cm.results)) except DockerException as e: From 7d820bbcfd0e86e2e1b8fc249eb693e20de281ec Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 22:15:51 +0200 Subject: [PATCH 26/38] Refactoring. --- plugins/module_utils/module_container.py | 278 +++++++++++++++++- plugins/modules/docker_container.py | 353 +++++++---------------- 2 files changed, 388 insertions(+), 243 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index 30d793575..e01829ea0 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -30,9 +30,13 @@ from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion +from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound + from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( - parse_env_file, convert_port_bindings, + normalize_links, + parse_env_file, + parse_repository_tag, ) @@ -176,7 +180,6 @@ class Engine(object): min_api_version = None # string or None min_api_version_obj = None # LooseVersion object or None - @abc.abstractmethod def get_value(self, module, container, api_version, options): pass @@ -218,6 +221,98 @@ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, # Return (module, active_options, client) pass + @abc.abstractmethod + def get_container_id(self, container): + pass + + @abc.abstractmethod + def get_image_from_container(self, container): + pass + + @abc.abstractmethod + def is_container_removing(self, container): + pass + + @abc.abstractmethod + def is_container_running(self, container): + pass + + @abc.abstractmethod + def is_container_paused(self, container): + pass + + @abc.abstractmethod + def inspect_container_by_name(self, client, container_name): + pass + + @abc.abstractmethod + def inspect_container_by_id(self, client, container_id): + pass + + @abc.abstractmethod + def inspect_image_by_id(self, client, image_id): + pass + + @abc.abstractmethod + def inspect_image_by_name(self, client, repository, tag): + pass + + @abc.abstractmethod + def pull_image(self, client, repository, tag): + pass + + @abc.abstractmethod + def pause_container(self, client, container_id): + pass + + @abc.abstractmethod + def unpause_container(self, client, container_id): + pass + + @abc.abstractmethod + def disconnect_container_from_network(self, client, container_id, network_id): + pass + + @abc.abstractmethod + def connect_container_to_network(self, client, container_id, network_id, parameters=None): + pass + + @abc.abstractmethod + def create_container(self, client, container_name, create_parameters): + pass + + @abc.abstractmethod + def start_container(self, client, container_id): + pass + + @abc.abstractmethod + def wait_for_container(self, client, container_id): + pass + + @abc.abstractmethod + def get_container_output(self, client, container_id): + pass + + @abc.abstractmethod + def update_container(self, client, container_id, update_parameters): + pass + + @abc.abstractmethod + def restart_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def kill_container(self, client, container_id, kill_signal=None): + pass + + @abc.abstractmethod + def stop_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): + pass + class DockerAPIEngineDriver(EngineDriver): name = 'docker_api' @@ -264,6 +359,180 @@ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, return client.module, active_options, client + def get_container_id(self, container): + return container['Id'] + + def get_image_from_container(self, container): + return container['Image'] + + def is_container_removing(self, container): + if container.get('State'): + return container['State'].get('Status') == 'removing' + return False + + def is_container_running(self, container): + if container.get('State'): + if container['State'].get('Running') and not container['State'].get('Ghost', False): + return True + return False + + def is_container_paused(self, container): + if container.get('State'): + return container['State'].get('Paused', False) + return False + + def inspect_container_by_name(self, client, container_name): + return client.get_container(container_name) + + def inspect_container_by_id(self, client, container_id): + return client.get_container_by_id(container_id) + + def inspect_image_by_id(self, client, image_id): + return client.find_image_by_id(image_id) + + def inspect_image_by_name(self, client, repository, tag): + return client.find_image(repository, tag) + + def pull_image(self, client, repository, tag): + return client.pull_image(repository, tag) + + def pause_container(self, client, container_id): + client.post_call('/containers/{0}/pause', container_id) + + def unpause_container(self, client, container_id): + client.post_call('/containers/{0}/unpause', container_id) + + def disconnect_container_from_network(self, client, container_id, network_id): + client.post_json('/networks/{0}/disconnect', network_id, data={'Container': container_id}) + + def connect_container_to_network(self, client, container_id, network_id, parameters=None): + parameters = (parameters or {}).copy() + params = {} + for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): + value = parameters.pop(para, None) + if value: + if para == 'links': + value = normalize_links(value) + params[dest_para] = value + if parameters: + raise Exception( + 'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)]))) + ipam_config = {} + for param in ('IPv4Address', 'IPv6Address'): + if param in params: + ipam_config[param] = params.pop(param) + if ipam_config: + params['IPAMConfig'] = ipam_config + data = { + 'Container': container_id, + 'EndpointConfig': params, + } + client.post_json('/networks/{0}/connect', network_id, data=data) + + def create_container(self, client, container_name, create_parameters): + params = {'name': container_name} + new_container = client.post_json_to_json('/containers/create', data=create_parameters, params=params) + client.report_warnings(new_container) + return new_container['Id'] + + def start_container(self, client, container_id): + client.post_json('/containers/{0}/start', container_id) + + def wait_for_container(self, client, container_id): + return client.post_json_to_json('/containers/{0}/wait', container_id)['StatusCode'] + + def get_container_output(self, client, container_id): + config = client.get_json('/containers/{0}/json', container_id) + logging_driver = config['HostConfig']['LogConfig']['Type'] + if logging_driver in ('json-file', 'journald', 'local'): + params = { + 'stderr': 1, + 'stdout': 1, + 'timestamps': 0, + 'follow': 0, + 'tail': 'all', + } + res = client._get(client._url('/containers/{0}/logs', container_id), params=params) + output = client._get_result_tty(False, res, config['Config']['Tty']) + return output, True + else: + return "Result logged using `%s` driver" % logging_driver, False + + def update_container(self, client, container_id, update_parameters): + result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) + client.report_warnings(result) + + def restart_container(self, client, container_id, timeout=None): + client_timeout = client.timeout + if client_timeout is not None: + client_timeout += timeout or 10 + client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) + + def kill_container(self, client, container_id, kill_signal=None): + params = {} + if kill_signal is not None: + params['signal'] = int(kill_signal) + client.post_call('/containers/{0}/kill', container_id, params=params) + + def stop_container(self, client, container_id, timeout=None): + if timeout: + params = {'t': timeout} + else: + params = {} + timeout = 10 + client_timeout = client.timeout + if client_timeout is not None: + client_timeout += timeout + count = 0 + while True: + try: + client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + raise Exception('%s [tried to unpause three times]' % to_native(exc)) + count += 1 + # Unpause + try: + self.unpause_container(client, container_id) + except Exception as exc2: + raise Exception('%s [while unpausing]' % to_native(exc2)) + # Now try again + continue + raise + # We only loop when explicitly requested by 'continue' + break + + def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): + params = {'v': remove_volumes, 'link': link, 'force': force} + count = 0 + while True: + try: + client.delete_call('/containers/{0}', container_id, params=params) + except NotFound as dummy: + pass + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + raise Exception('%s [tried to unpause three times]' % to_native(exc)) + count += 1 + # Unpause + try: + self.unpause_container(client, container_id) + except Exception as exc2: + raise Exception('%s [while unpausing]' % to_native(exc2)) + # Now try again + continue + if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: + pass + raise + # We only loop when explicitly requested by 'continue' + break + class DockerAPIEngine(Engine): def __init__( @@ -1599,6 +1868,11 @@ def _preprocess_container_names(module, client, api_version, value): .add_option('hostname', type='str') .add_docker_api(DockerAPIEngine.config_value('Hostname')), + OptionGroup(preprocess=_preprocess_networks) + .add_option('image', type='str') + .add_docker_api(DockerAPIEngine.config_value( + 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True)), + OptionGroup() .add_option('init', type='bool') .add_docker_api(DockerAPIEngine.host_config_value('Init')), diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index bcba28225..36314c298 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1199,24 +1199,17 @@ sample: 0 ''' -import os import re -import shlex import traceback from time import sleep -from ansible.module_utils.common.text.formatters import human_to_bytes from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import string_types - -from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion from ansible_collections.community.docker.plugins.module_utils.common_api import ( RequestException, ) from ansible_collections.community.docker.plugins.module_utils.module_container import ( DockerAPIEngineDriver, - Option, ) from ansible_collections.community.docker.plugins.module_utils.util import ( DifferenceTracker, @@ -1224,27 +1217,24 @@ compare_generic, is_image_name_id, sanitize_result, - clean_dict_booleans_for_docker_api, - omit_none_from_dict, - parse_healthcheck, - DOCKER_COMMON_ARGS, ) -from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException -from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag, normalize_links +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag class Container(DockerBaseClass): - def __init__(self, container): + def __init__(self, container, engine_driver): super(Container, self).__init__() self.raw = container - self.Id = None - self.Image = None + self.id = None + self.image = None self.container = container + self.engine_driver = engine_driver if container: - self.Id = container['Id'] - self.Image = container['Image'] + self.id = engine_driver.get_container_id(container) + self.image = engine_driver.get_image_from_container(container) self.log(self.container, pretty_print=True) @property @@ -1253,22 +1243,15 @@ def exists(self): @property def removing(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Status') == 'removing' - return False + return self.engine_driver.is_container_removing(self.container) if self.container else False @property def running(self): - if self.container and self.container.get('State'): - if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): - return True - return False + return self.engine_driver.is_container_running(self.container) if self.container else False @property def paused(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Paused', False) - return False + return self.engine_driver.is_container_paused(self.container) if self.container else False class ContainerManager(DockerBaseClass): @@ -1322,10 +1305,6 @@ def _collect_all_options(self, active_options): for options in active_options: for option in options.options: all_options[option.name] = option - for option in [ - Option('image', 'str', None), - ]: - all_options[option.name] = option return all_options def _collect_all_module_params(self): @@ -1447,7 +1426,7 @@ def wait_for_state(self, container_id, complete_states=None, wait_states=None, a total_wait = 0 while True: # Inspect container - result = self.client.get_container_by_id(container_id) + result = self.engine_driver.inspect_container_by_id(self.client, container_id) if result is None: if accept_removal: return @@ -1514,7 +1493,7 @@ def present(self, state): if container.removing and not self.check_mode: # Wait for container to be removed before trying to create it self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) new_container = self.container_create(self.param_image) if new_container: container = new_container @@ -1533,16 +1512,16 @@ def present(self, state): self.log("differences") self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) image_to_use = self.param_image - if not image_to_use and container and container.Image: - image_to_use = container.Image + if not image_to_use and container and container.image: + image_to_use = container.image if not image_to_use: self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') if container.running: - self.container_stop(container.Id) - self.container_remove(container.Id) + self.container_stop(container.id) + self.container_remove(container.id) if not self.check_mode: self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) new_container = self.container_create(image_to_use) if new_container: container = new_container @@ -1554,29 +1533,29 @@ def present(self, state): if state == 'started' and not container.running: self.diff_tracker.add('running', parameter=True, active=was_running) - container = self.container_start(container.Id) + container = self.container_start(container.id) elif state == 'started' and self.param_restart: self.diff_tracker.add('running', parameter=True, active=was_running) self.diff_tracker.add('restarted', parameter=True, active=False) - container = self.container_restart(container.Id) + container = self.container_restart(container.id) elif state == 'stopped' and container.running: self.diff_tracker.add('running', parameter=False, active=was_running) - self.container_stop(container.Id) - container = self._get_container(container.Id) + self.container_stop(container.id) + container = self._get_container(container.id) if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) if not self.check_mode: try: if self.param_paused: - self.client.post_call('/containers/{0}/pause', container.Id) + self.engine_driver.pause_container(self.client, container.id) else: - self.client.post_call('/containers/{0}/unpause', container.Id) + self.engine_driver.unpause_container(self.client, container.id) except Exception as exc: self.fail("Error %s container %s: %s" % ( - "pausing" if self.param_paused else "unpausing", container.Id, to_native(exc) + "pausing" if self.param_paused else "unpausing", container.id, to_native(exc) )) - container = self._get_container(container.Id) + container = self._get_container(container.id) self.results['changed'] = True self.results['actions'].append(dict(set_paused=self.param_paused)) @@ -1587,9 +1566,9 @@ def absent(self): if container.exists: if container.running: self.diff_tracker.add('running', parameter=False, active=True) - self.container_stop(container.Id) + self.container_stop(container.id) self.diff_tracker.add('exists', parameter=False, active=True) - self.container_remove(container.Id) + self.container_remove(container.id) def _output_logs(self, msg): self.module.log(msg=msg) @@ -1598,7 +1577,8 @@ def _get_container(self, container): ''' Expects container ID or Name. Returns a container object ''' - return Container(self.client.get_container(container)) + container = self.engine_driver.inspect_container_by_name(self.client, container) + return Container(container, self.engine_driver) def _get_image(self): image_parameter = self.param_image @@ -1606,16 +1586,16 @@ def _get_image(self): self.log('No image specified') return None if is_image_name_id(image_parameter): - image = self.client.find_image_by_id(image_parameter) + image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) else: repository, tag = parse_repository_tag(image_parameter) if not tag: tag = "latest" - image = self.client.find_image(repository, tag) + image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) if not image or self.param_pull: if not self.check_mode: self.log("Pull the image.") - image, alreadyToLatest = self.client.pull_image(repository, tag) + image, alreadyToLatest = self.engine_driver.pull_image(self.client, repository, tag) if alreadyToLatest: self.results['changed'] = False else: @@ -1633,95 +1613,79 @@ def _get_image(self): def _image_is_different(self, image, container): if image and image.get('Id'): - if container and container.Image: - if image.get('Id') != container.Image: - self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) + if container and container.image: + if image.get('Id') != container.image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image) return True return False def _compose_create_parameters(self, image): - params = { - 'Image': image, - } + params = {} for options, values in self.parameters: engine = options.get_engine(self.engine_driver.name) if engine.can_set_value(self.client.docker_api_version): engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) + params['Image'] = image return params + def _record_differences(self, differences, options, param_values, engine, container, image): + container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) + expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) + for option in options.options: + if option.name in expected_values: + param_value = expected_values[option.name] + container_value = container_values.get(option.name) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + + if not match: + # No match. + if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, + option, image, container_value, param_value): + # Ignore the result + continue + + # Record the differences + p = param_value + c = container_value + if option.comparison_type == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif option.comparison_type == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if option.name == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(option.name, parameter=p, active=c) + def has_different_configuration(self, container, image): differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) - container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) - expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) - for option in options.options: - if option.name in expected_values: - param_value = expected_values[option.name] - container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) - - if not match: - if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, - option, image, container_value, param_value): - continue - # TODO - # if option.name == 'healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: - # # If the healthcheck is disabled (both in parameters and for the current container), and the user - # # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the - # # expected_healthcheck comparison in this case. - # continue - - # no match. record the differences - p = param_value - c = container_value - if option.comparison_type == 'set': - # Since the order does not matter, sort so that the diff output is better. - if p is not None: - p = sorted(p) - if c is not None: - c = sorted(c) - elif option.comparison_type == 'set(dict)': - # Since the order does not matter, sort so that the diff output is better. - if option.name == 'expected_mounts': - # For selected values, use one entry as key - def sort_key_fn(x): - return x['target'] - else: - # We sort the list of dictionaries by using the sorted items of a dict as its key. - def sort_key_fn(x): - return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) - if p is not None: - p = sorted(p, key=sort_key_fn) - if c is not None: - c = sorted(c, key=sort_key_fn) - differences.add(option.name, parameter=p, active=c) - + self._record_differences(differences, options, param_values, engine, container, image) has_differences = not differences.empty return has_differences, differences def has_different_resource_limits(self, container, image): - ''' - Diff parameters and container resource limits - ''' differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) if not engine.can_update_value(self.client.docker_api_version): continue - container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) - expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) - for option in options.options: - if option.name in expected_values: - param_value = expected_values[option.name] - container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) - - if not match: - # no match. record the differences - differences.add(option.name, parameter=param_value, active=container_value) - different = not differences.empty - return different, differences + self._record_differences(differences, options, param_values, engine, container, image) + has_differences = not differences.empty + return has_differences, differences def _compose_update_parameters(self): result = {} @@ -1739,8 +1703,8 @@ def update_limits(self, container, image): self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) self.diff_tracker.merge(different_limits) if limits_differ and not self.check_mode: - self.container_update(container.Id, self._compose_update_parameters()) - return self._get_container(container.Id) + self.container_update(container.id, self._compose_update_parameters()) + return self._get_container(container.id) return container def has_network_differences(self, container): @@ -1859,48 +1823,32 @@ def _add_networks(self, container, differences): self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) if not self.check_mode: try: - self.client.post_json('/networks/{0}/disconnect', diff['parameter']['id'], data={'Container': container.Id}) + self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id']) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], to_native(exc))) # connect to the network self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) if not self.check_mode: + params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')} try: self.log("Connecting container to network %s" % diff['parameter']['id']) - self.log(diff['parameter'], pretty_print=True) - params = {} - for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): - if diff['parameter'].get(para): - value = diff['parameter'][para] - if para == 'links': - value = normalize_links(value) - params[dest_para] = value - ipam_config = {} - for param in ('IPv4Address', 'IPv6Address'): - if param in params: - ipam_config[param] = params.pop(param) - if ipam_config: - params['IPAMConfig'] = ipam_config - data = { - 'Container': container.Id, - 'EndpointConfig': params, - } - self.client.post_json('/networks/{0}/connect', diff['parameter']['id'], data=data) + self.log(params, pretty_print=True) + self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params) except Exception as exc: self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) - return self._get_container(container.Id) + return self._get_container(container.id) def _purge_networks(self, container, networks): for network in networks: self.results['actions'].append(dict(removed_from_network=network['name'])) if not self.check_mode: try: - self.client.post_json('/networks/{0}/disconnect', network['name'], data={'Container': container.Id}) + self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name']) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (network['name'], to_native(exc))) - return self._get_container(container.Id) + return self._get_container(container.id) def container_create(self, image): create_parameters = self._compose_create_parameters(image) @@ -1912,12 +1860,10 @@ def container_create(self, image): new_container = None if not self.check_mode: try: - params = {'name': self.param_name} - new_container = self.client.post_json_to_json('/containers/create', data=create_parameters, params=params) - self.client.report_warnings(new_container) + container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters) except Exception as exc: self.fail("Error creating container: %s" % to_native(exc)) - return self._get_container(new_container['Id']) + return self._get_container(container_id) return new_container def container_start(self, container_id): @@ -1926,12 +1872,12 @@ def container_start(self, container_id): self.results['changed'] = True if not self.check_mode: try: - self.client.post_json('/containers/{0}/start', container_id) + self.engine_driver.start_container(self.client, container_id) except Exception as exc: self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) if self.module.params['detach'] is False: - status = self.client.post_json_to_json('/containers/{0}/wait', container_id)['StatusCode'] + status = self.engine_driver.wait_for_container(self.client, container_id) self.client.fail_results['status'] = status self.results['status'] = status @@ -1940,23 +1886,9 @@ def container_start(self, container_id): if self.param_output_logs: self.module.warn('Cannot output_logs if auto_remove is enabled!') else: - config = self.client.get_json('/containers/{0}/json', container_id) - logging_driver = config['HostConfig']['LogConfig']['Type'] - - if logging_driver in ('json-file', 'journald', 'local'): - params = { - 'stderr': 1, - 'stdout': 1, - 'timestamps': 0, - 'follow': 0, - 'tail': 'all', - } - res = self.client._get(self.client._url('/containers/{0}/logs', container_id), params=params) - output = self.client._get_result_tty(False, res, config['Config']['Tty']) - if self.param_output_logs: - self._output_logs(msg=output) - else: - output = "Result logged using `%s` driver" % logging_driver + output, real_output = self.engine_driver.get_container_output(self.client, container_id) + if real_output and self.param_output_logs: + self._output_logs(msg=output) if self.param_cleanup: self.container_remove(container_id, force=True) @@ -1978,35 +1910,10 @@ def container_remove(self, container_id, link=False, force=False): self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) self.results['changed'] = True if not self.check_mode: - count = 0 - while True: - try: - params = {'v': volume_state, 'link': link, 'force': force} - self.client.delete_call('/containers/{0}', container_id, params=params) - except NotFound as dummy: - pass - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.post_call('/containers/{0}/unpause', container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: - pass - else: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break + try: + self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force) + except Exception as exc: + self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc))) def container_update(self, container_id, update_parameters): if update_parameters: @@ -2016,8 +1923,7 @@ def container_update(self, container_id, update_parameters): self.results['changed'] = True if not self.check_mode: try: - result = self.client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) - self.client.report_warnings(result) + self.engine_driver.update_container(self.client, container_id, update_parameters) except Exception as exc: self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) return self._get_container(container_id) @@ -2027,10 +1933,7 @@ def container_kill(self, container_id): self.results['changed'] = True if not self.check_mode: try: - params = {} - if self.param_kill_signal is not None: - params['signal'] = int(self.param_kill_signal) - self.client.post_call('/containers/{0}/kill', container_id, params=params) + self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal) except Exception as exc: self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) @@ -2039,11 +1942,7 @@ def container_restart(self, container_id): self.results['changed'] = True if not self.check_mode: try: - timeout = self.module.params['stop_timeout'] or 10 - client_timeout = self.client.timeout - if client_timeout is not None: - client_timeout += timeout - self.client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) + self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10) except Exception as exc: self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) return self._get_container(container_id) @@ -2055,45 +1954,17 @@ def container_stop(self, container_id): self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) self.results['changed'] = True if not self.check_mode: - count = 0 - while True: - try: - timeout = self.module.params['stop_timeout'] - if timeout: - params = {'t': timeout} - else: - params = {} - timeout = 10 - client_timeout = self.client.timeout - if client_timeout is not None: - client_timeout += timeout - self.client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.post_call('/containers/{0}/unpause', container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break + try: + self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout']) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) def main(): engine_driver = DockerAPIEngineDriver() module, active_options, client = engine_driver.setup( - argument_spec = dict( + argument_spec=dict( cleanup=dict(type='bool', default=False), comparisons=dict(type='dict'), container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), @@ -2116,7 +1987,7 @@ def main(): restart=dict(type='bool', default=False), state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), ), - required_if = [ + required_if=[ ('state', 'present', ['image']) ], ) From 999b7029c282d3ab471fb7d3f310f91c8e4740a6 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 11 Jul 2022 22:24:40 +0200 Subject: [PATCH 27/38] Avoid using anything Docker specific from self.client. --- plugins/module_utils/module_container.py | 38 ++++++++++++++++++++---- plugins/modules/docker_container.py | 37 +++++++++-------------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container.py index e01829ea0..acc0ff23c 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container.py @@ -10,6 +10,7 @@ import os import re import shlex +import traceback from functools import partial @@ -30,7 +31,11 @@ from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion -from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound +from ansible_collections.community.docker.plugins.module_utils._api.errors import ( + APIError, + DockerException, + NotFound, +) from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( convert_port_bindings, @@ -221,6 +226,10 @@ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, # Return (module, active_options, client) pass + @abc.abstractmethod + def get_api_version(self, client): + pass + @abc.abstractmethod def get_container_id(self, container): pass @@ -313,6 +322,10 @@ def stop_container(self, client, container_id, timeout=None): def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): pass + @abc.abstractmethod + def run(self, runner, client): + pass + class DockerAPIEngineDriver(EngineDriver): name = 'docker_api' @@ -359,6 +372,9 @@ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, return client.module, active_options, client + def get_api_version(self, client): + return client.docker_api_version + def get_container_id(self, container): return container['Id'] @@ -533,6 +549,16 @@ def remove_container(self, client, container_id, remove_volumes=False, link=Fals # We only loop when explicitly requested by 'continue' break + def run(self, runner, client): + try: + runner() + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + class DockerAPIEngine(Engine): def __init__( @@ -786,8 +812,8 @@ def _get_default_host_ip(module, client): if network_data.get('name'): network = client.get_network(network_data['name']) if network is None: - module.fail_json( - msg="Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']), + client.fail( + "Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']), ) if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] @@ -1075,7 +1101,7 @@ def _ignore_mismatching_label_result(module, client, api_version, option, image, if would_remove_labels: msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" " this error. Labels: {0}") - module.fail_json(msg=msg.format(', '.join(would_remove_labels))) + client.fail(msg.format(', '.join(would_remove_labels))) return False @@ -1119,7 +1145,7 @@ def _preprocess_network_values(module, client, api_version, options, values): for network in values['networks']: network['id'] = _get_network_id(module, client, network['name']) if not network['id']: - module.fail_json(msg="Parameter error: network named %s could not be found. Does it exist?" % (network['name'], )) + client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], )) if 'network_mode' in values: values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode']) @@ -1137,7 +1163,7 @@ def _get_network_id(module, client, network_name): break return network_id except Exception as exc: - module.fail_json(msg="Error getting network id for %s - %s" % (network_name, to_native(exc))) + client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) def _get_values_network(module, container, api_version, options): diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 36314c298..3e7d112a8 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1200,14 +1200,10 @@ ''' import re -import traceback from time import sleep from ansible.module_utils.common.text.converters import to_native, to_text -from ansible_collections.community.docker.plugins.module_utils.common_api import ( - RequestException, -) from ansible_collections.community.docker.plugins.module_utils.module_container import ( DockerAPIEngineDriver, ) @@ -1219,8 +1215,6 @@ sanitize_result, ) -from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException - from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag @@ -1463,7 +1457,7 @@ def _collect_params(self, active_options): if not option.not_an_ansible_option and self.module.params[option.name] is not None: values[option.name] = self.module.params[option.name] values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client, self.client.docker_api_version, options.options, values) + engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values) parameters.append((options, values)) return parameters @@ -1623,14 +1617,15 @@ def _compose_create_parameters(self, image): params = {} for options, values in self.parameters: engine = options.get_engine(self.engine_driver.name) - if engine.can_set_value(self.client.docker_api_version): - engine.set_value(self.module, params, self.client.docker_api_version, options.options, values) + if engine.can_set_value(self.engine_driver.get_api_version(self.client)): + engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values) params['Image'] = image return params def _record_differences(self, differences, options, param_values, engine, container, image): - container_values = engine.get_value(self.module, container.raw, self.client.docker_api_version, options.options) - expected_values = engine.get_expected_values(self.module, self.client, self.client.docker_api_version, options.options, image, param_values.copy()) + container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options) + expected_values = engine.get_expected_values( + self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy()) for option in options.options: if option.name in expected_values: param_value = expected_values[option.name] @@ -1639,7 +1634,7 @@ def _record_differences(self, differences, options, param_values, engine, contai if not match: # No match. - if engine.ignore_mismatching_result(self.module, self.client, self.client.docker_api_version, + if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client), option, image, container_value, param_value): # Ignore the result continue @@ -1681,7 +1676,7 @@ def has_different_resource_limits(self, container, image): differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) - if not engine.can_update_value(self.client.docker_api_version): + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): continue self._record_differences(differences, options, param_values, engine, container, image) has_differences = not differences.empty @@ -1691,9 +1686,9 @@ def _compose_update_parameters(self): result = {} for options, values in self.parameters: engine = options.get_engine(self.engine_driver.name) - if not engine.can_update_value(self.client.docker_api_version): + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): continue - engine.update_value(self.module, result, self.client.docker_api_version, options.options, values) + engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values) return result def update_limits(self, container, image): @@ -1992,16 +1987,12 @@ def main(): ], ) - try: + def execute(): cm = ContainerManager(module, engine_driver, client, active_options) cm.run() - client.module.exit_json(**sanitize_result(cm.results)) - except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) - except RequestException as e: - client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), - exception=traceback.format_exc()) + module.exit_json(**sanitize_result(cm.results)) + + engine_driver.run(execute, client) if __name__ == '__main__': From 5e31484aed609149af9ff7b085a7221d68dd3a24 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 12 Jul 2022 11:00:34 +0200 Subject: [PATCH 28/38] Refactor. --- plugins/module_utils/module_container/base.py | 1176 ++++++++++++++++ .../docker_api.py} | 1233 +++-------------- .../module_utils/module_container/module.py | 796 +++++++++++ plugins/modules/docker_container.py | 792 +---------- 4 files changed, 2208 insertions(+), 1789 deletions(-) create mode 100644 plugins/module_utils/module_container/base.py rename plugins/module_utils/{module_container.py => module_container/docker_api.py} (51%) create mode 100644 plugins/module_utils/module_container/module.py diff --git a/plugins/module_utils/module_container/base.py b/plugins/module_utils/module_container/base.py new file mode 100644 index 000000000..dba79df0c --- /dev/null +++ b/plugins/module_utils/module_container/base.py @@ -0,0 +1,1176 @@ +# Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import abc +import os +import re +import shlex + +from functools import partial + +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.six import string_types + +from ansible_collections.community.docker.plugins.module_utils.util import ( + clean_dict_booleans_for_docker_api, + omit_none_from_dict, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_env_file, +) + + +_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]' + + +_MOUNT_OPTION_TYPES = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', +) + + +def _get_ansible_type(type): + if type == 'set': + return 'list' + if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): + raise Exception('Invalid type "%s"' % (type, )) + return type + + +class Option(object): + def __init__( + self, + name, + type, + owner, + ansible_type=None, + elements=None, + ansible_elements=None, + ansible_suboptions=None, + ansible_aliases=None, + ansible_choices=None, + needs_no_suboptions=False, + default_comparison=None, + not_a_container_option=False, + not_an_ansible_option=False, + copy_comparison_from=None, + ): + self.name = name + self.type = type + self.ansible_type = ansible_type or _get_ansible_type(type) + needs_elements = self.type in ('list', 'set') + needs_ansible_elements = self.ansible_type in ('list', ) + if elements is not None and not needs_elements: + raise Exception('elements only allowed for lists/sets') + if elements is None and needs_elements: + raise Exception('elements required for lists/sets') + if ansible_elements is not None and not needs_ansible_elements: + raise Exception('Ansible elements only allowed for Ansible lists') + if (elements is None and ansible_elements is None) and needs_ansible_elements: + raise Exception('Ansible elements required for Ansible lists') + self.elements = elements if needs_elements else None + self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None + needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict') + if ansible_suboptions is not None and not needs_suboptions: + raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') + if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option: + raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') + self.ansible_suboptions = ansible_suboptions if needs_suboptions else None + self.ansible_aliases = ansible_aliases or [] + self.ansible_choices = ansible_choices + comparison_type = self.type + if comparison_type == 'set' and self.elements == 'dict': + comparison_type = 'set(dict)' + elif comparison_type not in ('set', 'list', 'dict'): + comparison_type = 'value' + self.comparison_type = comparison_type + if default_comparison is not None: + self.comparison = default_comparison + elif comparison_type in ('list', 'value'): + self.comparison = 'strict' + else: + self.comparison = 'allow_more_present' + self.not_a_container_option = not_a_container_option + self.not_an_ansible_option = not_an_ansible_option + self.copy_comparison_from = copy_comparison_from + + +class OptionGroup(object): + def __init__( + self, + preprocess=None, + ansible_mutually_exclusive=None, + ansible_required_together=None, + ansible_required_one_of=None, + ansible_required_if=None, + ansible_required_by=None, + ): + if preprocess is None: + def preprocess(module, values): + return values + self.preprocess = preprocess + self.options = [] + self.engines = {} + self.ansible_mutually_exclusive = ansible_mutually_exclusive or [] + self.ansible_required_together = ansible_required_together or [] + self.ansible_required_one_of = ansible_required_one_of or [] + self.ansible_required_if = ansible_required_if or [] + self.ansible_required_by = ansible_required_by or {} + self.argument_spec = {} + + def add_option(self, *args, **kwargs): + option = Option(*args, owner=self, **kwargs) + if not option.not_a_container_option: + self.options.append(option) + if not option.not_an_ansible_option: + ansible_option = { + 'type': option.ansible_type, + } + if option.ansible_elements is not None: + ansible_option['elements'] = option.ansible_elements + if option.ansible_suboptions is not None: + ansible_option['options'] = option.ansible_suboptions + if option.ansible_aliases: + ansible_option['aliases'] = option.ansible_aliases + if option.ansible_choices is not None: + ansible_option['choices'] = option.ansible_choices + self.argument_spec[option.name] = ansible_option + return self + + def supports_engine(self, engine_name): + return engine_name in self.engines + + def get_engine(self, engine_name): + return self.engines[engine_name] + + def add_engine(self, engine_name, engine): + self.engines[engine_name] = engine + return self + + +class Engine(object): + min_api_version = None # string or None + min_api_version_obj = None # LooseVersion object or None + + @abc.abstractmethod + def get_value(self, module, container, api_version, options): + pass + + @abc.abstractmethod + def set_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def get_expected_values(self, module, client, api_version, options, image, values): + pass + + @abc.abstractmethod + def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value): + pass + + @abc.abstractmethod + def preprocess_value(self, module, client, api_version, options, values): + pass + + @abc.abstractmethod + def update_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def can_set_value(self, api_version): + pass + + @abc.abstractmethod + def can_update_value(self, api_version): + pass + + +class EngineDriver(object): + name = None # string + + @abc.abstractmethod + def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): + # Return (module, active_options, client) + pass + + @abc.abstractmethod + def get_api_version(self, client): + pass + + @abc.abstractmethod + def get_container_id(self, container): + pass + + @abc.abstractmethod + def get_image_from_container(self, container): + pass + + @abc.abstractmethod + def is_container_removing(self, container): + pass + + @abc.abstractmethod + def is_container_running(self, container): + pass + + @abc.abstractmethod + def is_container_paused(self, container): + pass + + @abc.abstractmethod + def inspect_container_by_name(self, client, container_name): + pass + + @abc.abstractmethod + def inspect_container_by_id(self, client, container_id): + pass + + @abc.abstractmethod + def inspect_image_by_id(self, client, image_id): + pass + + @abc.abstractmethod + def inspect_image_by_name(self, client, repository, tag): + pass + + @abc.abstractmethod + def pull_image(self, client, repository, tag): + pass + + @abc.abstractmethod + def pause_container(self, client, container_id): + pass + + @abc.abstractmethod + def unpause_container(self, client, container_id): + pass + + @abc.abstractmethod + def disconnect_container_from_network(self, client, container_id, network_id): + pass + + @abc.abstractmethod + def connect_container_to_network(self, client, container_id, network_id, parameters=None): + pass + + @abc.abstractmethod + def create_container(self, client, container_name, create_parameters): + pass + + @abc.abstractmethod + def start_container(self, client, container_id): + pass + + @abc.abstractmethod + def wait_for_container(self, client, container_id): + pass + + @abc.abstractmethod + def get_container_output(self, client, container_id): + pass + + @abc.abstractmethod + def update_container(self, client, container_id, update_parameters): + pass + + @abc.abstractmethod + def restart_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def kill_container(self, client, container_id, kill_signal=None): + pass + + @abc.abstractmethod + def stop_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): + pass + + @abc.abstractmethod + def run(self, runner, client): + pass + + +def _is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def _parse_port_range(range_or_port, module): + ''' + Parses a string containing either a single port or a range of ports. + + Returns a list of integers for each port in the list. + ''' + if '-' in range_or_port: + try: + start, end = [int(port) for port in range_or_port.split('-')] + except Exception: + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + if end < start: + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + return list(range(start, end + 1)) + else: + try: + return [int(range_or_port)] + except Exception: + module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port)) + + +def _split_colon_ipv6(text, module): + ''' + Split string by ':', while keeping IPv6 addresses in square brackets in one component. + ''' + if '[' not in text: + return text.split(':') + start = 0 + result = [] + while start < len(text): + i = text.find('[', start) + if i < 0: + result.extend(text[start:].split(':')) + break + j = text.find(']', i) + if j < 0: + module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + result.extend(text[start:i].split(':')) + k = text.find(':', j) + if k < 0: + result[-1] += text[i:] + start = len(text) + else: + result[-1] += text[i:k] + if k == len(text): + result.append('') + break + start = k + 1 + return result + + +def _preprocess_command(module, values): + if 'command' not in values: + return values + value = values['command'] + if module.params['command_handling'] == 'correct': + if value is not None: + if not isinstance(value, list): + # convert from str to list + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str + if isinstance(value, list): + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + return {} + return { + 'command': value, + } + + +def _preprocess_entrypoint(module, values): + if 'entrypoint' not in values: + return values + value = values['entrypoint'] + if module.params['command_handling'] == 'correct': + if value is not None: + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str. + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + return {} + return { + 'entrypoint': value, + } + + +def _preprocess_env(module, values): + if not values: + return {} + final_env = {} + if 'env_file' in values: + parsed_env_file = parse_env_file(values['env_file']) + for name, value in parsed_env_file.items(): + final_env[name] = to_text(value, errors='surrogate_or_strict') + if 'env' in values: + for name, value in values['env'].items(): + if not isinstance(value, string_types): + module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be ' + 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) + final_env[name] = to_text(value, errors='surrogate_or_strict') + formatted_env = [] + for key, value in final_env.items(): + formatted_env.append('%s=%s' % (key, value)) + return { + 'env': formatted_env, + } + + +def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): + if name not in values: + return values + try: + value = values[name] + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + value = unlimited_value + else: + value = human_to_bytes(value) + values[name] = value + return values + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + +def _preprocess_mac_address(module, values): + if 'mac_address' not in values: + return values + return { + 'mac_address': values['mac_address'].replace('-', ':'), + } + + +def _preprocess_networks(module, values): + if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + values['network_mode'] = values['networks'][0]['name'] + + if 'networks' in values: + for network in values['networks']: + if network['links']: + parsed_links = [] + for link in network['links']: + parsed_link = link.split(':', 1) + if len(parsed_link) == 1: + parsed_link = (link, link) + parsed_links.append(tuple(parsed_link)) + network['links'] = parsed_links + + return values + + +def _preprocess_sysctls(module, values): + if 'sysctls' in values: + for key, value in values['sysctls'].items(): + values['sysctls'][key] = to_text(value, errors='surrogate_or_strict') + return values + + +def _preprocess_tmpfs(module, values): + if 'tmpfs' not in values: + return values + result = {} + for tmpfs_spec in values['tmpfs']: + split_spec = tmpfs_spec.split(":", 1) + if len(split_spec) > 1: + result[split_spec[0]] = split_spec[1] + else: + result[split_spec[0]] = "" + return { + 'tmpfs': result + } + + +def _preprocess_ulimits(module, values): + if 'ulimits' not in values: + return values + result = [] + for limit in values['ulimits']: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['Name'] = pieces[0] + limits['Soft'] = int(pieces[1]) + limits['Hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['Hard'] = int(pieces[2]) + result.append(limits) + return { + 'ulimits': result, + } + + +def _preprocess_mounts(module, values): + last = dict() + + def check_collision(t, name): + if t in last: + if name == last[t]: + module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name)) + else: + module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + last[t] = name + + if 'mounts' in values: + mounts = [] + for mount in values['mounts']: + target = mount['target'] + mount_type = mount['type'] + + check_collision(target, 'mounts') + + mount_dict = dict(mount) + + # Sanity checks + if mount['source'] is None and mount_type not in ('tmpfs', 'volume'): + module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) + for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): + if mount[option] is not None and mount_type != req_mount_type: + module.fail_json( + msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type) + ) + + # Streamline options + volume_options = mount_dict.pop('volume_options') + if mount_dict['volume_driver'] and volume_options: + mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options) + if mount_dict['labels']: + mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) + if mount_dict['tmpfs_size'] is not None: + try: + mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) + except ValueError as exc: + module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) + if mount_dict['tmpfs_mode'] is not None: + try: + mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) + except Exception as dummy: + module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + + # Add result to list + mounts.append(omit_none_from_dict(mount_dict)) + values['mounts'] = mounts + if 'volumes' in values: + new_vols = [] + for vol in values['volumes']: + parts = vol.split(':') + if ':' in vol: + if len(parts) == 3: + host, container, mode = parts + if not _is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + if re.match(r'[.~]', host): + host = os.path.abspath(os.path.expanduser(host)) + check_collision(container, 'volumes') + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(parts) == 2: + if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + host = os.path.abspath(os.path.expanduser(parts[0])) + check_collision(parts[1], 'volumes') + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + check_collision(parts[min(1, len(parts) - 1)], 'volumes') + new_vols.append(vol) + values['volumes'] = new_vols + new_binds = [] + for vol in new_vols: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not _is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + elif len(parts) == 2: + if not _is_volume_permissions(parts[1]): + host, container, mode = (parts + ['rw']) + if host is not None: + new_binds.append('%s:%s:%s' % (host, container, mode)) + values['volume_binds'] = new_binds + return values + + +def _preprocess_log(module, values): + result = {} + if 'log_driver' not in values: + return result + result['log_driver'] = values['log_driver'] + if 'log_options' in values: + options = {} + for k, v in values['log_options'].items(): + if not isinstance(v, string_types): + module.warn( + "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " + "If this is not correct, or you want to avoid such warnings, please quote the value." % ( + k, to_text(v, errors='surrogate_or_strict')) + ) + v = to_text(v, errors='surrogate_or_strict') + options[k] = v + result['log_options'] = options + return result + + +def _preprocess_ports(module, values): + if 'published_ports' in values: + if 'all' in values['published_ports']: + module.fail_json( + msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' + 'to randomly assign port mappings for those not specified by published_ports.') + + binds = {} + for port in values['published_ports']: + parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module) + container_port = parts[-1] + protocol = '' + if '/' in container_port: + container_port, protocol = parts[-1].split('/') + container_ports = _parse_port_range(container_port, module) + + p_len = len(parts) + if p_len == 1: + port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )] + elif p_len == 2: + if len(container_ports) == 1: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])] + else: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)] + elif p_len == 3: + # We only allow IPv4 and IPv6 addresses for the bind address + ipaddr = parts[0] + if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): + module.fail_json( + msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' + 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr) + ) + if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + ipaddr = ipaddr[1:-1] + if parts[1]: + if len(container_ports) == 1: + port_binds = [(ipaddr, parts[1])] + else: + port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)] + else: + port_binds = len(container_ports) * [(ipaddr,)] + else: + module.fail_json( + msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' + 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len) + ) + + for bind, container_port in zip(port_binds, container_ports): + idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + if idx in binds: + old_bind = binds[idx] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[idx] = [old_bind, bind] + else: + binds[idx] = bind + values['published_ports'] = binds + + exposed = [] + if 'exposed_ports' in values: + for port in values['exposed_ports']: + port = to_text(port, errors='surrogate_or_strict').strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if 'published_ports' in values: + # Any published port should also be exposed + for publish_port in values['published_ports']: + match = False + if isinstance(publish_port, string_types) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if exposed_port[1] != protocol: + continue + if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + values['ports'] = exposed + return values + + +OPTION_AUTO_REMOVE = ( + OptionGroup() + .add_option('auto_remove', type='bool') +) + +OPTION_BLKIO_WEIGHT = ( + OptionGroup() + .add_option('blkio_weight', type='int') +) + +OPTION_CAPABILITIES = ( + OptionGroup() + .add_option('capabilities', type='set', elements='str') +) + +OPTION_CAP_DROP = ( + OptionGroup() + .add_option('cap_drop', type='set', elements='str') +) + +OPTION_CGROUP_PARENT = ( + OptionGroup() + .add_option('cgroup_parent', type='str') +) + +OPTION_COMMAND = ( + OptionGroup(preprocess=_preprocess_command) + .add_option('command', type='list', elements='str', ansible_type='raw') +) + +OPTION_CPU_PERIOD = ( + OptionGroup() + .add_option('cpu_period', type='int') +) + +OPTION_CPU_QUOTA = ( + OptionGroup() + .add_option('cpu_quota', type='int') +) + +OPTION_CPUSET_CPUS = ( + OptionGroup() + .add_option('cpuset_cpus', type='str') +) + +OPTION_CPUSET_MEMS = ( + OptionGroup() + .add_option('cpuset_mems', type='str') +) + +OPTION_CPU_SHARES = ( + OptionGroup() + .add_option('cpu_shares', type='int') +) + +OPTION_ENTRYPOINT = ( + OptionGroup(preprocess=_preprocess_entrypoint) + .add_option('entrypoint', type='list', elements='str') +) + +OPTION_CPUS = ( + OptionGroup() + .add_option('cpus', type='int', ansible_type='float') +) + +OPTION_DETACH_INTERACTIVE = ( + OptionGroup() + .add_option('detach', type='bool') + .add_option('interactive', type='bool') +) + +OPTION_DEVICES = ( + OptionGroup() + .add_option('devices', type='set', elements='dict', ansible_elements='str') +) + +OPTION_DEVICE_READ_BPS = ( + OptionGroup() + .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) +) + +OPTION_DEVICE_WRITE_BPS = ( + OptionGroup() + .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) +) + +OPTION_DEVICE_READ_IOPS = ( + OptionGroup() + .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) +) + +OPTION_DEVICE_WRITE_IOPS = ( + OptionGroup() + .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) +) + +OPTION_DEVICE_REQUESTS = ( + OptionGroup() + .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( + capabilities=dict(type='list', elements='list'), + count=dict(type='int'), + device_ids=dict(type='list', elements='str'), + driver=dict(type='str'), + options=dict(type='dict'), + )) +) + +OPTION_DNS_SERVERS = ( + OptionGroup() + .add_option('dns_servers', type='list', elements='str') +) + +OPTION_DNS_OPTS = ( + OptionGroup() + .add_option('dns_opts', type='set', elements='str') +) + +OPTION_DNS_SEARCH_DOMAINS = ( + OptionGroup() + .add_option('dns_search_domains', type='list', elements='str') +) + +OPTION_DOMAINNAME = ( + OptionGroup() + .add_option('domainname', type='str') +) + +OPTION_ENVIRONMENT = ( + OptionGroup(preprocess=_preprocess_env) + .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) + .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True) +) + +OPTION_ETC_HOSTS = ( + OptionGroup() + .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) +) + +OPTION_GROUPS = ( + OptionGroup() + .add_option('groups', type='set', elements='str') +) + +OPTION_HEALTHCHECK = ( + OptionGroup() + .add_option('healthcheck', type='dict', ansible_suboptions=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )) +) + +OPTION_HOSTNAME = ( + OptionGroup() + .add_option('hostname', type='str') +) + +OPTION_IMAGE = ( + OptionGroup(preprocess=_preprocess_networks) + .add_option('image', type='str') +) + +OPTION_INIT = ( + OptionGroup() + .add_option('init', type='bool') +) + +OPTION_IPC_MODE = ( + OptionGroup() + .add_option('ipc_mode', type='str') +) + +OPTION_KERNEL_MEMORY = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) + .add_option('kernel_memory', type='int', ansible_type='str') +) + +OPTION_LABELS = ( + OptionGroup() + .add_option('labels', type='dict', needs_no_suboptions=True) +) + +OPTION_LINKS = ( + OptionGroup() + .add_option('links', type='set', elements='list', ansible_elements='str') +) + +OPTION_LOG_DRIVER_OPTIONS = ( + OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']}) + .add_option('log_driver', type='str') + .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True) +) + +OPTION_MAC_ADDRESS = ( + OptionGroup(preprocess=_preprocess_mac_address) + .add_option('mac_address', type='str') +) + +OPTION_MEMORY = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) + .add_option('memory', type='int', ansible_type='str') +) + +OPTION_MEMORY_RESERVATION = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) + .add_option('memory_reservation', type='int', ansible_type='str') +) + +OPTION_MEMORY_SWAP = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) + .add_option('memory_swap', type='int', ansible_type='str') +) + +OPTION_MEMORY_SWAPPINESS = ( + OptionGroup() + .add_option('memory_swappiness', type='int') +) + +OPTION_STOP_TIMEOUT = ( + OptionGroup() + .add_option('stop_timeout', type='int', default_comparison='ignore') +) + +OPTION_NETWORK = ( + OptionGroup(preprocess=_preprocess_networks) + .add_option('network_mode', type='str') + .add_option('networks', type='set', elements='dict', ansible_suboptions=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )) +) + +OPTION_OOM_KILLER = ( + OptionGroup() + .add_option('oom_killer', type='bool') +) + +OPTION_OOM_SCORE_ADJ = ( + OptionGroup() + .add_option('oom_score_adj', type='int') +) + +OPTION_PID_MODE = ( + OptionGroup() + .add_option('pid_mode', type='str') +) + +OPTION_PIDS_LIMIT = ( + OptionGroup() + .add_option('pids_limit', type='int') +) + +OPTION_PRIVILEGED = ( + OptionGroup() + .add_option('privileged', type='bool') +) + +OPTION_READ_ONLY = ( + OptionGroup() + .add_option('read_only', type='bool') +) + +OPTION_RESTART_POLICY = ( + OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) + .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) + .add_option('restart_retries', type='int') +) + +OPTION_RUNTIME = ( + OptionGroup() + .add_option('runtime', type='str') +) + +OPTION_SECURITY_OPTS = ( + OptionGroup() + .add_option('security_opts', type='set', elements='str') +) + +OPTION_SHM_SIZE = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size')) + .add_option('shm_size', type='int', ansible_type='str') +) + +OPTION_STOP_SIGNAL = ( + OptionGroup() + .add_option('stop_signal', type='str') +) + +OPTION_STORAGE_OPTS = ( + OptionGroup() + .add_option('storage_opts', type='dict', needs_no_suboptions=True) +) + +OPTION_SYSCTLS = ( + OptionGroup(preprocess=_preprocess_sysctls) + .add_option('sysctls', type='dict', needs_no_suboptions=True) +) + +OPTION_TMPFS = ( + OptionGroup(preprocess=_preprocess_tmpfs) + .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str') +) + +OPTION_TTY = ( + OptionGroup() + .add_option('tty', type='bool') +) + +OPTION_ULIMITS = ( + OptionGroup(preprocess=_preprocess_ulimits) + .add_option('ulimits', type='set', elements='dict', ansible_elements='str') +) + +OPTION_USER = ( + OptionGroup() + .add_option('user', type='str') +) + +OPTION_USERNS_MODE = ( + OptionGroup() + .add_option('userns_mode', type='str') +) + +OPTION_UTS = ( + OptionGroup() + .add_option('uts', type='str') +) + +OPTION_VOLUME_DRIVER = ( + OptionGroup() + .add_option('volume_driver', type='str') +) + +OPTION_VOLUMES_FROM = ( + OptionGroup() + .add_option('volumes_from', type='set', elements='str') +) + +OPTION_WORKING_DIR = ( + OptionGroup() + .add_option('working_dir', type='str') +) + +OPTION_MOUNTS_VOLUMES = ( + OptionGroup(preprocess=_preprocess_mounts) + .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict( + target=dict(type='str', required=True), + source=dict(type='str'), + type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), + read_only=dict(type='bool'), + consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), + propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), + no_copy=dict(type='bool'), + labels=dict(type='dict'), + volume_driver=dict(type='str'), + volume_options=dict(type='dict'), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='str'), + )) + .add_option('volumes', type='set', elements='str') + .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') +) + +OPTION_PORTS = ( + OptionGroup(preprocess=_preprocess_ports) + .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose']) + .add_option('publish_all_ports', type='bool') + .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports']) + .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore') +) + +OPTIONS = [ + OPTION_AUTO_REMOVE, + OPTION_BLKIO_WEIGHT, + OPTION_CAPABILITIES, + OPTION_CAP_DROP, + OPTION_CGROUP_PARENT, + OPTION_COMMAND, + OPTION_CPU_PERIOD, + OPTION_CPU_QUOTA, + OPTION_CPUSET_CPUS, + OPTION_CPUSET_MEMS, + OPTION_CPU_SHARES, + OPTION_ENTRYPOINT, + OPTION_CPUS, + OPTION_DETACH_INTERACTIVE, + OPTION_DEVICES, + OPTION_DEVICE_READ_BPS, + OPTION_DEVICE_WRITE_BPS, + OPTION_DEVICE_READ_IOPS, + OPTION_DEVICE_WRITE_IOPS, + OPTION_DEVICE_REQUESTS, + OPTION_DNS_SERVERS, + OPTION_DNS_OPTS, + OPTION_DNS_SEARCH_DOMAINS, + OPTION_DOMAINNAME, + OPTION_ENVIRONMENT, + OPTION_ETC_HOSTS, + OPTION_GROUPS, + OPTION_HEALTHCHECK, + OPTION_HOSTNAME, + OPTION_IMAGE, + OPTION_INIT, + OPTION_IPC_MODE, + OPTION_KERNEL_MEMORY, + OPTION_LABELS, + OPTION_LINKS, + OPTION_LOG_DRIVER_OPTIONS, + OPTION_MAC_ADDRESS, + OPTION_MEMORY, + OPTION_MEMORY_RESERVATION, + OPTION_MEMORY_SWAP, + OPTION_MEMORY_SWAPPINESS, + OPTION_STOP_TIMEOUT, + OPTION_NETWORK, + OPTION_OOM_KILLER, + OPTION_OOM_SCORE_ADJ, + OPTION_PID_MODE, + OPTION_PIDS_LIMIT, + OPTION_PRIVILEGED, + OPTION_READ_ONLY, + OPTION_RESTART_POLICY, + OPTION_RUNTIME, + OPTION_SECURITY_OPTS, + OPTION_SHM_SIZE, + OPTION_STOP_SIGNAL, + OPTION_STORAGE_OPTS, + OPTION_SYSCTLS, + OPTION_TMPFS, + OPTION_TTY, + OPTION_ULIMITS, + OPTION_USER, + OPTION_USERNS_MODE, + OPTION_UTS, + OPTION_VOLUME_DRIVER, + OPTION_VOLUMES_FROM, + OPTION_WORKING_DIR, + OPTION_MOUNTS_VOLUMES, + OPTION_PORTS, +] diff --git a/plugins/module_utils/module_container.py b/plugins/module_utils/module_container/docker_api.py similarity index 51% rename from plugins/module_utils/module_container.py rename to plugins/module_utils/module_container/docker_api.py index acc0ff23c..264c60d05 100644 --- a/plugins/module_utils/module_container.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -5,26 +5,91 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import abc import json -import os -import re -import shlex import traceback -from functools import partial - from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.six import string_types from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, ) +from ansible_collections.community.docker.plugins.module_utils.module_container.base import ( + OPTION_AUTO_REMOVE, + OPTION_BLKIO_WEIGHT, + OPTION_CAPABILITIES, + OPTION_CAP_DROP, + OPTION_CGROUP_PARENT, + OPTION_COMMAND, + OPTION_CPU_PERIOD, + OPTION_CPU_QUOTA, + OPTION_CPUSET_CPUS, + OPTION_CPUSET_MEMS, + OPTION_CPU_SHARES, + OPTION_ENTRYPOINT, + OPTION_CPUS, + OPTION_DETACH_INTERACTIVE, + OPTION_DEVICES, + OPTION_DEVICE_READ_BPS, + OPTION_DEVICE_WRITE_BPS, + OPTION_DEVICE_READ_IOPS, + OPTION_DEVICE_WRITE_IOPS, + OPTION_DEVICE_REQUESTS, + OPTION_DNS_SERVERS, + OPTION_DNS_OPTS, + OPTION_DNS_SEARCH_DOMAINS, + OPTION_DOMAINNAME, + OPTION_ENVIRONMENT, + OPTION_ETC_HOSTS, + OPTION_GROUPS, + OPTION_HEALTHCHECK, + OPTION_HOSTNAME, + OPTION_IMAGE, + OPTION_INIT, + OPTION_IPC_MODE, + OPTION_KERNEL_MEMORY, + OPTION_LABELS, + OPTION_LINKS, + OPTION_LOG_DRIVER_OPTIONS, + OPTION_MAC_ADDRESS, + OPTION_MEMORY, + OPTION_MEMORY_RESERVATION, + OPTION_MEMORY_SWAP, + OPTION_MEMORY_SWAPPINESS, + OPTION_STOP_TIMEOUT, + OPTION_NETWORK, + OPTION_OOM_KILLER, + OPTION_OOM_SCORE_ADJ, + OPTION_PID_MODE, + OPTION_PIDS_LIMIT, + OPTION_PRIVILEGED, + OPTION_READ_ONLY, + OPTION_RESTART_POLICY, + OPTION_RUNTIME, + OPTION_SECURITY_OPTS, + OPTION_SHM_SIZE, + OPTION_STOP_SIGNAL, + OPTION_STORAGE_OPTS, + OPTION_SYSCTLS, + OPTION_TMPFS, + OPTION_TTY, + OPTION_ULIMITS, + OPTION_USER, + OPTION_USERNS_MODE, + OPTION_UTS, + OPTION_VOLUME_DRIVER, + OPTION_VOLUMES_FROM, + OPTION_WORKING_DIR, + OPTION_MOUNTS_VOLUMES, + OPTION_PORTS, + OPTIONS, + Engine, + EngineDriver, +) + from ansible_collections.community.docker.plugins.module_utils.util import ( - clean_dict_booleans_for_docker_api, omit_none_from_dict, parse_healthcheck, ) @@ -40,7 +105,6 @@ from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( convert_port_bindings, normalize_links, - parse_env_file, parse_repository_tag, ) @@ -67,266 +131,9 @@ def _get_ansible_type(type): return type -class Option(object): - def __init__( - self, - name, - type, - owner, - ansible_type=None, - elements=None, - ansible_elements=None, - ansible_suboptions=None, - ansible_aliases=None, - ansible_choices=None, - needs_no_suboptions=False, - default_comparison=None, - not_a_container_option=False, - not_an_ansible_option=False, - copy_comparison_from=None, - ): - self.name = name - self.type = type - self.ansible_type = ansible_type or _get_ansible_type(type) - needs_elements = self.type in ('list', 'set') - needs_ansible_elements = self.ansible_type in ('list', ) - if elements is not None and not needs_elements: - raise Exception('elements only allowed for lists/sets') - if elements is None and needs_elements: - raise Exception('elements required for lists/sets') - if ansible_elements is not None and not needs_ansible_elements: - raise Exception('Ansible elements only allowed for Ansible lists') - if (elements is None and ansible_elements is None) and needs_ansible_elements: - raise Exception('Ansible elements required for Ansible lists') - self.elements = elements if needs_elements else None - self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None - needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict') - if ansible_suboptions is not None and not needs_suboptions: - raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') - if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option: - raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') - self.ansible_suboptions = ansible_suboptions if needs_suboptions else None - self.ansible_aliases = ansible_aliases or [] - self.ansible_choices = ansible_choices - comparison_type = self.type - if comparison_type == 'set' and self.elements == 'dict': - comparison_type = 'set(dict)' - elif comparison_type not in ('set', 'list', 'dict'): - comparison_type = 'value' - self.comparison_type = comparison_type - if default_comparison is not None: - self.comparison = default_comparison - elif comparison_type in ('list', 'value'): - self.comparison = 'strict' - else: - self.comparison = 'allow_more_present' - self.not_a_container_option = not_a_container_option - self.not_an_ansible_option = not_an_ansible_option - self.copy_comparison_from = copy_comparison_from - - -class OptionGroup(object): - def __init__( - self, - preprocess=None, - ansible_mutually_exclusive=None, - ansible_required_together=None, - ansible_required_one_of=None, - ansible_required_if=None, - ansible_required_by=None, - ): - if preprocess is None: - def preprocess(module, values): - return values - self.preprocess = preprocess - self.options = [] - self.engines = {} - self.ansible_mutually_exclusive = ansible_mutually_exclusive or [] - self.ansible_required_together = ansible_required_together or [] - self.ansible_required_one_of = ansible_required_one_of or [] - self.ansible_required_if = ansible_required_if or [] - self.ansible_required_by = ansible_required_by or {} - self.argument_spec = {} - - def add_option(self, *args, **kwargs): - option = Option(*args, owner=self, **kwargs) - if not option.not_a_container_option: - self.options.append(option) - if not option.not_an_ansible_option: - ansible_option = { - 'type': option.ansible_type, - } - if option.ansible_elements is not None: - ansible_option['elements'] = option.ansible_elements - if option.ansible_suboptions is not None: - ansible_option['options'] = option.ansible_suboptions - if option.ansible_aliases: - ansible_option['aliases'] = option.ansible_aliases - if option.ansible_choices is not None: - ansible_option['choices'] = option.ansible_choices - self.argument_spec[option.name] = ansible_option - return self - - def supports_engine(self, engine_name): - return engine_name in self.engines - - def get_engine(self, engine_name): - return self.engines[engine_name] - - def add_docker_api(self, docker_api): - self.engines['docker_api'] = docker_api - return self - - _SENTRY = object() -class Engine(object): - min_api_version = None # string or None - min_api_version_obj = None # LooseVersion object or None - - @abc.abstractmethod - def get_value(self, module, container, api_version, options): - pass - - @abc.abstractmethod - def set_value(self, module, data, api_version, options, values): - pass - - @abc.abstractmethod - def get_expected_values(self, module, client, api_version, options, image, values): - pass - - @abc.abstractmethod - def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value): - pass - - @abc.abstractmethod - def preprocess_value(self, module, client, api_version, options, values): - pass - - @abc.abstractmethod - def update_value(self, module, data, api_version, options, values): - pass - - @abc.abstractmethod - def can_set_value(self, api_version): - pass - - @abc.abstractmethod - def can_update_value(self, api_version): - pass - - -class EngineDriver(object): - name = None # string - - @abc.abstractmethod - def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): - # Return (module, active_options, client) - pass - - @abc.abstractmethod - def get_api_version(self, client): - pass - - @abc.abstractmethod - def get_container_id(self, container): - pass - - @abc.abstractmethod - def get_image_from_container(self, container): - pass - - @abc.abstractmethod - def is_container_removing(self, container): - pass - - @abc.abstractmethod - def is_container_running(self, container): - pass - - @abc.abstractmethod - def is_container_paused(self, container): - pass - - @abc.abstractmethod - def inspect_container_by_name(self, client, container_name): - pass - - @abc.abstractmethod - def inspect_container_by_id(self, client, container_id): - pass - - @abc.abstractmethod - def inspect_image_by_id(self, client, image_id): - pass - - @abc.abstractmethod - def inspect_image_by_name(self, client, repository, tag): - pass - - @abc.abstractmethod - def pull_image(self, client, repository, tag): - pass - - @abc.abstractmethod - def pause_container(self, client, container_id): - pass - - @abc.abstractmethod - def unpause_container(self, client, container_id): - pass - - @abc.abstractmethod - def disconnect_container_from_network(self, client, container_id, network_id): - pass - - @abc.abstractmethod - def connect_container_to_network(self, client, container_id, network_id, parameters=None): - pass - - @abc.abstractmethod - def create_container(self, client, container_name, create_parameters): - pass - - @abc.abstractmethod - def start_container(self, client, container_id): - pass - - @abc.abstractmethod - def wait_for_container(self, client, container_id): - pass - - @abc.abstractmethod - def get_container_output(self, client, container_id): - pass - - @abc.abstractmethod - def update_container(self, client, container_id, update_parameters): - pass - - @abc.abstractmethod - def restart_container(self, client, container_id, timeout=None): - pass - - @abc.abstractmethod - def kill_container(self, client, container_id, kill_signal=None): - pass - - @abc.abstractmethod - def stop_container(self, client, container_id, timeout=None): - pass - - @abc.abstractmethod - def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): - pass - - @abc.abstractmethod - def run(self, runner, client): - pass - - class DockerAPIEngineDriver(EngineDriver): name = 'docker_api' @@ -747,57 +554,6 @@ def _is_volume_permissions(mode): return True -def _parse_port_range(range_or_port, module): - ''' - Parses a string containing either a single port or a range of ports. - - Returns a list of integers for each port in the list. - ''' - if '-' in range_or_port: - try: - start, end = [int(port) for port in range_or_port.split('-')] - except Exception: - module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) - if end < start: - module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) - return list(range(start, end + 1)) - else: - try: - return [int(range_or_port)] - except Exception: - module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port)) - - -def _split_colon_ipv6(text, module): - ''' - Split string by ':', while keeping IPv6 addresses in square brackets in one component. - ''' - if '[' not in text: - return text.split(':') - start = 0 - result = [] - while start < len(text): - i = text.find('[', start) - if i < 0: - result.extend(text[start:].split(':')) - break - j = text.find(']', i) - if j < 0: - module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) - result.extend(text[start:i].split(':')) - k = text.find(':', j) - if k < 0: - result[-1] += text[i:] - start = len(text) - else: - result[-1] += text[i:k] - if k == len(text): - result.append('') - break - start = k + 1 - return result - - def _normalize_port(port): if '/' not in port: return port + '/tcp' @@ -848,71 +604,6 @@ def _set_value_detach_interactive(module, data, api_version, options, values): data['StdinOnce'] = True -def _preprocess_command(module, values): - if 'command' not in values: - return values - value = values['command'] - if module.params['command_handling'] == 'correct': - if value is not None: - if not isinstance(value, list): - # convert from str to list - value = shlex.split(to_text(value, errors='surrogate_or_strict')) - value = [to_text(x, errors='surrogate_or_strict') for x in value] - elif value: - # convert from list to str - if isinstance(value, list): - value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) - value = [to_text(x, errors='surrogate_or_strict') for x in value] - else: - value = shlex.split(to_text(value, errors='surrogate_or_strict')) - value = [to_text(x, errors='surrogate_or_strict') for x in value] - else: - return {} - return { - 'command': value, - } - - -def _preprocess_entrypoint(module, values): - if 'entrypoint' not in values: - return values - value = values['entrypoint'] - if module.params['command_handling'] == 'correct': - if value is not None: - value = [to_text(x, errors='surrogate_or_strict') for x in value] - elif value: - # convert from list to str. - value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) - value = [to_text(x, errors='surrogate_or_strict') for x in value] - else: - return {} - return { - 'entrypoint': value, - } - - -def _preprocess_env(module, values): - if not values: - return {} - final_env = {} - if 'env_file' in values: - parsed_env_file = parse_env_file(values['env_file']) - for name, value in parsed_env_file.items(): - final_env[name] = to_text(value, errors='surrogate_or_strict') - if 'env' in values: - for name, value in values['env'].items(): - if not isinstance(value, string_types): - module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be ' - 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) - final_env[name] = to_text(value, errors='surrogate_or_strict') - formatted_env = [] - for key, value in final_env.items(): - formatted_env.append('%s=%s' % (key, value)) - return { - 'env': formatted_env, - } - - def _get_expected_env_value(module, client, api_version, image, value, sentry): expected_env = {} if image and image['Config'].get('Env'): @@ -1105,34 +796,6 @@ def _ignore_mismatching_label_result(module, client, api_version, option, image, return False -def _preprocess_mac_address(module, values): - if 'mac_address' not in values: - return values - return { - 'mac_address': values['mac_address'].replace('-', ':'), - } - - -def _preprocess_networks(module, values): - if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - values['network_mode'] = values['networks'][0]['name'] - - if 'networks' in values: - for network in values['networks']: - if network['links']: - parsed_links = [] - for link in network['links']: - parsed_link = link.split(':', 1) - if len(parsed_link) == 1: - parsed_link = (link, link) - parsed_links.append(tuple(parsed_link)) - network['links'] = parsed_links - - return values - - def _ignore_mismatching_network_result(module, client, api_version, option, image, container_value, expected_value): # 'networks' is handled out-of-band if option.name == 'networks': @@ -1182,138 +845,6 @@ def _set_values_network(module, data, api_version, options, values): data['HostConfig']['NetworkMode'] = value -def _preprocess_sysctls(module, values): - if 'sysctls' in values: - for key, value in values['sysctls'].items(): - values['sysctls'][key] = to_text(value, errors='surrogate_or_strict') - return values - - -def _preprocess_tmpfs(module, values): - if 'tmpfs' not in values: - return values - result = {} - for tmpfs_spec in values['tmpfs']: - split_spec = tmpfs_spec.split(":", 1) - if len(split_spec) > 1: - result[split_spec[0]] = split_spec[1] - else: - result[split_spec[0]] = "" - return { - 'tmpfs': result - } - - -def _preprocess_ulimits(module, values): - if 'ulimits' not in values: - return values - result = [] - for limit in values['ulimits']: - limits = dict() - pieces = limit.split(':') - if len(pieces) >= 2: - limits['Name'] = pieces[0] - limits['Soft'] = int(pieces[1]) - limits['Hard'] = int(pieces[1]) - if len(pieces) == 3: - limits['Hard'] = int(pieces[2]) - result.append(limits) - return { - 'ulimits': result, - } - - -def _preprocess_mounts(module, values): - last = dict() - - def check_collision(t, name): - if t in last: - if name == last[t]: - module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name)) - else: - module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) - last[t] = name - - if 'mounts' in values: - mounts = [] - for mount in values['mounts']: - target = mount['target'] - mount_type = mount['type'] - - check_collision(target, 'mounts') - - mount_dict = dict(mount) - - # Sanity checks - if mount['source'] is None and mount_type not in ('tmpfs', 'volume'): - module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) - for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): - if mount[option] is not None and mount_type != req_mount_type: - module.fail_json( - msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type) - ) - - # Streamline options - volume_options = mount_dict.pop('volume_options') - if mount_dict['volume_driver'] and volume_options: - mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options) - if mount_dict['labels']: - mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) - if mount_dict['tmpfs_size'] is not None: - try: - mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) - except ValueError as exc: - module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) - if mount_dict['tmpfs_mode'] is not None: - try: - mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) - except Exception as dummy: - module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) - - # Add result to list - mounts.append(omit_none_from_dict(mount_dict)) - values['mounts'] = mounts - if 'volumes' in values: - new_vols = [] - for vol in values['volumes']: - parts = vol.split(':') - if ':' in vol: - if len(parts) == 3: - host, container, mode = parts - if not _is_volume_permissions(mode): - module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) - if re.match(r'[.~]', host): - host = os.path.abspath(os.path.expanduser(host)) - check_collision(container, 'volumes') - new_vols.append("%s:%s:%s" % (host, container, mode)) - continue - elif len(parts) == 2: - if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): - host = os.path.abspath(os.path.expanduser(parts[0])) - check_collision(parts[1], 'volumes') - new_vols.append("%s:%s:rw" % (host, parts[1])) - continue - check_collision(parts[min(1, len(parts) - 1)], 'volumes') - new_vols.append(vol) - values['volumes'] = new_vols - new_binds = [] - for vol in new_vols: - host = None - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not _is_volume_permissions(mode): - module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) - elif len(parts) == 2: - if not _is_volume_permissions(parts[1]): - host, container, mode = (parts + ['rw']) - if host is not None: - new_binds.append('%s:%s:%s' % (host, container, mode)) - values['volume_binds'] = new_binds - return values - - def _get_values_mounts(module, container, api_version, options): volumes = container['Config'].get('Volumes') binds = container['HostConfig'].get('Binds') @@ -1479,26 +1010,6 @@ def _set_values_mounts(module, data, api_version, options, values): data['HostConfig']['Binds'] = values['volume_binds'] -def _preprocess_log(module, values): - result = {} - if 'log_driver' not in values: - return result - result['log_driver'] = values['log_driver'] - if 'log_options' in values: - options = {} - for k, v in values['log_options'].items(): - if not isinstance(v, string_types): - module.warn( - "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " - "If this is not correct, or you want to avoid such warnings, please quote the value." % ( - k, to_text(v, errors='surrogate_or_strict')) - ) - v = to_text(v, errors='surrogate_or_strict') - options[k] = v - result['log_options'] = options - return result - - def _get_values_log(module, container, api_version, options): log_config = container['HostConfig'].get('LogConfig') or {} return { @@ -1548,100 +1059,6 @@ def _update_value_restart(module, data, api_version, options, values): } -def _preprocess_ports(module, values): - if 'published_ports' in values: - if 'all' in values['published_ports']: - module.fail_json( - msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' - 'to randomly assign port mappings for those not specified by published_ports.') - - binds = {} - for port in values['published_ports']: - parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module) - container_port = parts[-1] - protocol = '' - if '/' in container_port: - container_port, protocol = parts[-1].split('/') - container_ports = _parse_port_range(container_port, module) - - p_len = len(parts) - if p_len == 1: - port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )] - elif p_len == 2: - if len(container_ports) == 1: - port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])] - else: - port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)] - elif p_len == 3: - # We only allow IPv4 and IPv6 addresses for the bind address - ipaddr = parts[0] - if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): - module.fail_json( - msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' - 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr) - ) - if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): - ipaddr = ipaddr[1:-1] - if parts[1]: - if len(container_ports) == 1: - port_binds = [(ipaddr, parts[1])] - else: - port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)] - else: - port_binds = len(container_ports) * [(ipaddr,)] - else: - module.fail_json( - msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' - 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len) - ) - - for bind, container_port in zip(port_binds, container_ports): - idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port - if idx in binds: - old_bind = binds[idx] - if isinstance(old_bind, list): - old_bind.append(bind) - else: - binds[idx] = [old_bind, bind] - else: - binds[idx] = bind - values['published_ports'] = binds - - exposed = [] - if 'exposed_ports' in values: - for port in values['exposed_ports']: - port = to_text(port, errors='surrogate_or_strict').strip() - protocol = 'tcp' - match = re.search(r'(/.+$)', port) - if match: - protocol = match.group(1).replace('/', '') - port = re.sub(r'/.+$', '', port) - exposed.append((port, protocol)) - if 'published_ports' in values: - # Any published port should also be exposed - for publish_port in values['published_ports']: - match = False - if isinstance(publish_port, string_types) and '/' in publish_port: - port, protocol = publish_port.split('/') - port = int(port) - else: - protocol = 'tcp' - port = int(publish_port) - for exposed_port in exposed: - if exposed_port[1] != protocol: - continue - if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: - start_port, end_port = exposed_port[0].split('-') - if int(start_port) <= port <= int(end_port): - match = True - elif exposed_port[0] == port: - match = True - if not match: - exposed.append((port, protocol)) - values['ports'] = exposed - return values - - def _get_values_ports(module, container, api_version, options): host_config = container['HostConfig'] config = container['Config'] @@ -1750,349 +1167,161 @@ def _preprocess_container_names(module, client, api_version, value): return 'container:{0}'.format(container['Id']) -OPTIONS = [ - OptionGroup() - .add_option('auto_remove', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('AutoRemove')), - - OptionGroup() - .add_option('blkio_weight', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight')), - - OptionGroup() - .add_option('capabilities', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('CapAdd')), - - OptionGroup() - .add_option('cap_drop', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('CapDrop')), - - OptionGroup() - .add_option('cgroup_parent', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('CgroupParent')), - - OptionGroup(preprocess=_preprocess_command) - .add_option('command', type='list', elements='str', ansible_type='raw') - .add_docker_api(DockerAPIEngine.config_value('Cmd')), - - OptionGroup() - .add_option('cpu_period', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod')), - - OptionGroup() - .add_option('cpu_quota', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota')), - - OptionGroup() - .add_option('cpuset_cpus', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus')), - - OptionGroup() - .add_option('cpuset_mems', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems')), - - OptionGroup() - .add_option('cpu_shares', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares')), - - OptionGroup(preprocess=_preprocess_entrypoint) - .add_option('entrypoint', type='list', elements='str') - .add_docker_api(DockerAPIEngine.config_value('Entrypoint')), - - OptionGroup() - .add_option('cpus', type='int', ansible_type='float') - .add_docker_api(DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus)), - - OptionGroup() - .add_option('detach', type='bool') - .add_option('interactive', type='bool') - .add_docker_api(DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive)), - - OptionGroup() - .add_option('devices', type='set', elements='dict', ansible_elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)), - - OptionGroup() - .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps)), - - OptionGroup() - .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps)), - - OptionGroup() - .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops)), - - OptionGroup() - .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )) - .add_docker_api(DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops)), - - OptionGroup() - .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( - capabilities=dict(type='list', elements='list'), - count=dict(type='int'), - device_ids=dict(type='list', elements='str'), - driver=dict(type='str'), - options=dict(type='dict'), - )) - .add_docker_api(DockerAPIEngine.host_config_value('DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests)), - - OptionGroup() - .add_option('dns_servers', type='list', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('Dns')), - - OptionGroup() - .add_option('dns_opts', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('DnsOptions')), - - OptionGroup() - .add_option('dns_search_domains', type='list', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('DnsSearch')), - - OptionGroup() - .add_option('domainname', type='str') - .add_docker_api(DockerAPIEngine.config_value('Domainname')), - - OptionGroup(preprocess=_preprocess_env) - .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) - .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True) - .add_docker_api(DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value)), - - OptionGroup() - .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)), - - OptionGroup() - .add_option('groups', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('GroupAdd')), - - OptionGroup() - .add_option('healthcheck', type='dict', ansible_suboptions=dict( - test=dict(type='raw'), - interval=dict(type='str'), - timeout=dict(type='str'), - start_period=dict(type='str'), - retries=dict(type='int'), - )) - .add_docker_api(DockerAPIEngine.config_value( - 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)), - - OptionGroup() - .add_option('hostname', type='str') - .add_docker_api(DockerAPIEngine.config_value('Hostname')), - - OptionGroup(preprocess=_preprocess_networks) - .add_option('image', type='str') - .add_docker_api(DockerAPIEngine.config_value( - 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True)), - - OptionGroup() - .add_option('init', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('Init')), - - OptionGroup() - .add_option('ipc_mode', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names)), - - OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) - .add_option('kernel_memory', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory')), - - OptionGroup() - .add_option('labels', type='dict', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.config_value( - 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)), - - OptionGroup() - .add_option('links', type='set', elements='list', ansible_elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links)), - - OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']}) - .add_option('log_driver', type='str') - .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine( - get_value=_get_values_log, - set_value=_set_values_log, - )), - - OptionGroup(preprocess=_preprocess_mac_address) - .add_option('mac_address', type='str') - .add_docker_api(DockerAPIEngine.config_value('MacAddress')), - - OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) - .add_option('memory', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('Memory', update_parameter='Memory')), - - OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) - .add_option('memory_reservation', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation')), - - OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) - .add_option('memory_swap', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap')), - - OptionGroup() - .add_option('memory_swappiness', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('MemorySwappiness')), - - OptionGroup() - .add_option('stop_timeout', type='int', default_comparison='ignore') - .add_docker_api(DockerAPIEngine.config_value('StopTimeout')), - - OptionGroup(preprocess=_preprocess_networks) - .add_option('network_mode', type='str') - .add_option('networks', type='set', elements='dict', ansible_suboptions=dict( - name=dict(type='str', required=True), - ipv4_address=dict(type='str'), - ipv6_address=dict(type='str'), - aliases=dict(type='list', elements='str'), - links=dict(type='list', elements='str'), - )) - .add_docker_api(DockerAPIEngine( - preprocess_value=_preprocess_network_values, - get_value=_get_values_network, - set_value=_set_values_network, - ignore_mismatching_result=_ignore_mismatching_network_result, - )), - - OptionGroup() - .add_option('oom_killer', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('OomKillDisable')), - - OptionGroup() - .add_option('oom_score_adj', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('OomScoreAdj')), - - OptionGroup() - .add_option('pid_mode', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names)), - - OptionGroup() - .add_option('pids_limit', type='int') - .add_docker_api(DockerAPIEngine.host_config_value('PidsLimit')), - - OptionGroup() - .add_option('privileged', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('Privileged')), - - OptionGroup() - .add_option('read_only', type='bool') - .add_docker_api(DockerAPIEngine.host_config_value('ReadonlyRootfs')), - - OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) - .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) - .add_option('restart_retries', type='int') - .add_docker_api(DockerAPIEngine( - get_value=_get_values_restart, - set_value=_set_values_restart, - update_value=_update_value_restart, - )), - - OptionGroup() - .add_option('runtime', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('Runtime')), - - OptionGroup() - .add_option('security_opts', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('SecurityOpt')), - - OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size')) - .add_option('shm_size', type='int', ansible_type='str') - .add_docker_api(DockerAPIEngine.host_config_value('ShmSize')), - - OptionGroup() - .add_option('stop_signal', type='str') - .add_docker_api(DockerAPIEngine.config_value('StopSignal')), - - OptionGroup() - .add_option('storage_opts', type='dict', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.host_config_value('StorageOpt')), - - OptionGroup(preprocess=_preprocess_sysctls) - .add_option('sysctls', type='dict', needs_no_suboptions=True) - .add_docker_api(DockerAPIEngine.host_config_value('Sysctls')), - - OptionGroup(preprocess=_preprocess_tmpfs) - .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('Tmpfs')), - - OptionGroup() - .add_option('tty', type='bool') - .add_docker_api(DockerAPIEngine.config_value('Tty')), - - OptionGroup(preprocess=_preprocess_ulimits) - .add_option('ulimits', type='set', elements='dict', ansible_elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('Ulimits')), - - OptionGroup() - .add_option('user', type='str') - .add_docker_api(DockerAPIEngine.config_value('User')), - - OptionGroup() - .add_option('userns_mode', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('UsernsMode')), - - OptionGroup() - .add_option('uts', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('UTSMode')), - - OptionGroup() - .add_option('volume_driver', type='str') - .add_docker_api(DockerAPIEngine.host_config_value('VolumeDriver')), - - OptionGroup() - .add_option('volumes_from', type='set', elements='str') - .add_docker_api(DockerAPIEngine.host_config_value('VolumesFrom')), - - OptionGroup() - .add_option('working_dir', type='str') - .add_docker_api(DockerAPIEngine.config_value('WorkingDir')), - - OptionGroup(preprocess=_preprocess_mounts) - .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict( - target=dict(type='str', required=True), - source=dict(type='str'), - type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), - read_only=dict(type='bool'), - consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), - propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), - no_copy=dict(type='bool'), - labels=dict(type='dict'), - volume_driver=dict(type='str'), - volume_options=dict(type='dict'), - tmpfs_size=dict(type='str'), - tmpfs_mode=dict(type='str'), - )) - .add_option('volumes', type='set', elements='str') - .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') - .add_docker_api(DockerAPIEngine( - get_value=_get_values_mounts, - get_expected_values=_get_expected_values_mounts, - set_value=_set_values_mounts, - )), - - OptionGroup(preprocess=_preprocess_ports) - .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose']) - .add_option('publish_all_ports', type='bool') - .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports']) - .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore') - .add_docker_api(DockerAPIEngine( - get_value=_get_values_ports, - get_expected_values=_get_expected_values_ports, - set_value=_set_values_ports, - preprocess_value=_preprocess_value_ports, - )), -] +OPTION_AUTO_REMOVE.add_engine('docker_api', DockerAPIEngine.host_config_value('AutoRemove')) + +OPTION_BLKIO_WEIGHT.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight')) + +OPTION_CAPABILITIES.add_engine('docker_api', DockerAPIEngine.host_config_value('CapAdd')) + +OPTION_CAP_DROP.add_engine('docker_api', DockerAPIEngine.host_config_value('CapDrop')) + +OPTION_CGROUP_PARENT.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupParent')) + +OPTION_COMMAND.add_engine('docker_api', DockerAPIEngine.config_value('Cmd')) + +OPTION_CPU_PERIOD.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod')) + +OPTION_CPU_QUOTA.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota')) + +OPTION_CPUSET_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus')) + +OPTION_CPUSET_MEMS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems')) + +OPTION_CPU_SHARES.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares')) + +OPTION_ENTRYPOINT.add_engine('docker_api', DockerAPIEngine.config_value('Entrypoint')) + +OPTION_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus)) + +OPTION_DETACH_INTERACTIVE.add_engine('docker_api', DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive)) + +OPTION_DEVICES.add_engine('docker_api', DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)) + +OPTION_DEVICE_READ_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps)) + +OPTION_DEVICE_WRITE_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps)) + +OPTION_DEVICE_READ_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops)) + +OPTION_DEVICE_WRITE_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops)) + +OPTION_DEVICE_REQUESTS.add_engine('docker_api', DockerAPIEngine.host_config_value( + 'DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests)) + +OPTION_DNS_SERVERS.add_engine('docker_api', DockerAPIEngine.host_config_value('Dns')) + +OPTION_DNS_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsOptions')) + +OPTION_DNS_SEARCH_DOMAINS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsSearch')) + +OPTION_DOMAINNAME.add_engine('docker_api', DockerAPIEngine.config_value('Domainname')) + +OPTION_ENVIRONMENT.add_engine('docker_api', DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value)) + +OPTION_ETC_HOSTS.add_engine('docker_api', DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)) + +OPTION_GROUPS.add_engine('docker_api', DockerAPIEngine.host_config_value('GroupAdd')) + +OPTION_HEALTHCHECK.add_engine('docker_api', DockerAPIEngine.config_value( + 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)) + +OPTION_HOSTNAME.add_engine('docker_api', DockerAPIEngine.config_value('Hostname')) + +OPTION_IMAGE.add_engine('docker_api', DockerAPIEngine.config_value( + 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True)) + +OPTION_INIT.add_engine('docker_api', DockerAPIEngine.host_config_value('Init')) + +OPTION_IPC_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names)) + +OPTION_KERNEL_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory')) + +OPTION_LABELS.add_engine('docker_api', DockerAPIEngine.config_value( + 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)) + +OPTION_LINKS.add_engine('docker_api', DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links)) + +OPTION_LOG_DRIVER_OPTIONS.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_log, + set_value=_set_values_log, +)) + +OPTION_MAC_ADDRESS.add_engine('docker_api', DockerAPIEngine.config_value('MacAddress')) + +OPTION_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('Memory', update_parameter='Memory')) + +OPTION_MEMORY_RESERVATION.add_engine('docker_api', DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation')) + +OPTION_MEMORY_SWAP.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap')) + +OPTION_MEMORY_SWAPPINESS.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwappiness')) + +OPTION_STOP_TIMEOUT.add_engine('docker_api', DockerAPIEngine.config_value('StopTimeout')) + +OPTION_NETWORK.add_engine('docker_api', DockerAPIEngine( + preprocess_value=_preprocess_network_values, + get_value=_get_values_network, + set_value=_set_values_network, + ignore_mismatching_result=_ignore_mismatching_network_result, +)) + +OPTION_OOM_KILLER.add_engine('docker_api', DockerAPIEngine.host_config_value('OomKillDisable')) + +OPTION_OOM_SCORE_ADJ.add_engine('docker_api', DockerAPIEngine.host_config_value('OomScoreAdj')) + +OPTION_PID_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names)) + +OPTION_PIDS_LIMIT.add_engine('docker_api', DockerAPIEngine.host_config_value('PidsLimit')) + +OPTION_PRIVILEGED.add_engine('docker_api', DockerAPIEngine.host_config_value('Privileged')) + +OPTION_READ_ONLY.add_engine('docker_api', DockerAPIEngine.host_config_value('ReadonlyRootfs')) + +OPTION_RESTART_POLICY.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_restart, + set_value=_set_values_restart, + update_value=_update_value_restart, +)) + +OPTION_RUNTIME.add_engine('docker_api', DockerAPIEngine.host_config_value('Runtime')) + +OPTION_SECURITY_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('SecurityOpt')) + +OPTION_SHM_SIZE.add_engine('docker_api', DockerAPIEngine.host_config_value('ShmSize')) + +OPTION_STOP_SIGNAL.add_engine('docker_api', DockerAPIEngine.config_value('StopSignal')) + +OPTION_STORAGE_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('StorageOpt')) + +OPTION_SYSCTLS.add_engine('docker_api', DockerAPIEngine.host_config_value('Sysctls')) + +OPTION_TMPFS.add_engine('docker_api', DockerAPIEngine.host_config_value('Tmpfs')) + +OPTION_TTY.add_engine('docker_api', DockerAPIEngine.config_value('Tty')) + +OPTION_ULIMITS.add_engine('docker_api', DockerAPIEngine.host_config_value('Ulimits')) + +OPTION_USER.add_engine('docker_api', DockerAPIEngine.config_value('User')) + +OPTION_USERNS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('UsernsMode')) + +OPTION_UTS.add_engine('docker_api', DockerAPIEngine.host_config_value('UTSMode')) + +OPTION_VOLUME_DRIVER.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumeDriver')) + +OPTION_VOLUMES_FROM.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumesFrom')) + +OPTION_WORKING_DIR.add_engine('docker_api', DockerAPIEngine.config_value('WorkingDir')) + +OPTION_MOUNTS_VOLUMES.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_mounts, + get_expected_values=_get_expected_values_mounts, + set_value=_set_values_mounts, +)) + +OPTION_PORTS.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_ports, + get_expected_values=_get_expected_values_ports, + set_value=_set_values_ports, + preprocess_value=_preprocess_value_ports, +)) diff --git a/plugins/module_utils/module_container/module.py b/plugins/module_utils/module_container/module.py new file mode 100644 index 000000000..c4e444514 --- /dev/null +++ b/plugins/module_utils/module_container/module.py @@ -0,0 +1,796 @@ +# Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from time import sleep + +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DifferenceTracker, + DockerBaseClass, + compare_generic, + is_image_name_id, + sanitize_result, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag + + +class Container(DockerBaseClass): + def __init__(self, container, engine_driver): + super(Container, self).__init__() + self.raw = container + self.id = None + self.image = None + self.container = container + self.engine_driver = engine_driver + if container: + self.id = engine_driver.get_container_id(container) + self.image = engine_driver.get_image_from_container(container) + self.log(self.container, pretty_print=True) + + @property + def exists(self): + return True if self.container else False + + @property + def removing(self): + return self.engine_driver.is_container_removing(self.container) if self.container else False + + @property + def running(self): + return self.engine_driver.is_container_running(self.container) if self.container else False + + @property + def paused(self): + return self.engine_driver.is_container_paused(self.container) if self.container else False + + +class ContainerManager(DockerBaseClass): + def __init__(self, module, engine_driver, client, active_options): + self.module = module + self.engine_driver = engine_driver + self.client = client + self.options = active_options + self.all_options = self._collect_all_options(active_options) + self.check_mode = self.module.check_mode + self.param_cleanup = self.module.params['cleanup'] + self.param_container_default_behavior = self.module.params['container_default_behavior'] + self.param_default_host_ip = self.module.params['default_host_ip'] + self.param_debug = self.module.params['debug'] + self.param_force_kill = self.module.params['force_kill'] + self.param_image = self.module.params['image'] + self.param_image_label_mismatch = self.module.params['image_label_mismatch'] + self.param_keep_volumes = self.module.params['keep_volumes'] + self.param_kill_signal = self.module.params['kill_signal'] + self.param_name = self.module.params['name'] + self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] + self.param_output_logs = self.module.params['output_logs'] + self.param_paused = self.module.params['paused'] + self.param_pull = self.module.params['pull'] + self.param_purge_networks = self.module.params['purge_networks'] + self.param_recreate = self.module.params['recreate'] + self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] + self.param_restart = self.module.params['restart'] + self.param_state = self.module.params['state'] + self._parse_comparisons() + self._update_params() + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.diff_tracker = DifferenceTracker() + self.facts = {} + if self.param_default_host_ip: + valid_ip = False + if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): + self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) + valid_ip = True + if not valid_ip: + self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' + 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) + + def _collect_all_options(self, active_options): + all_options = {} + for options in active_options: + for option in options.options: + all_options[option.name] = option + return all_options + + def _collect_all_module_params(self): + all_module_options = set() + for option, data in self.module.argument_spec.items(): + all_module_options.add(option) + if 'aliases' in data: + for alias in data['aliases']: + all_module_options.add(alias) + return all_module_options + + def _parse_comparisons(self): + # Keep track of all module params and all option aliases + all_module_options = self._collect_all_module_params() + comp_aliases = {} + for option_name, option in self.all_options.items(): + if option.not_an_ansible_option: + continue + comp_aliases[option_name] = option_name + for alias in option.ansible_aliases: + comp_aliases[alias] = option_name + # Process legacy ignore options + if self.module.params['ignore_image']: + self.all_options['image'].comparison = 'ignore' + if self.param_purge_networks: + self.all_options['networks'].comparison = 'strict' + # Process comparsions specified by user + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option in self.all_options.values(): + if option.name == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + option.comparison = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_module_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + if key not in self.all_options or self.all_options[key].not_an_ansible_option: + self.fail("Unknown module option '%s' in comparisons dict!" % key) + key_main = key + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + self.all_options[key_main].comparison = value + elif value == 'allow_more_present': + if self.all_options[key_main].comparison_type == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + self.all_options[key_main].comparison = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Copy values + for option in self.all_options.values(): + if option.copy_comparison_from is not None: + option.comparison = self.all_options[option.copy_comparison_from].comparison + # Check legacy values + if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + + def _update_params(self): + if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + self.module.params['network_mode'] = self.module.params['networks'][0]['name'] + if self.param_container_default_behavior == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory='0', + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + def fail(self, *args, **kwargs): + self.client.fail(*args, **kwargs) + + def run(self): + if self.param_state in ('stopped', 'started', 'present'): + self.present(self.param_state) + elif self.param_state == 'absent': + self.absent() + + if not self.check_mode and not self.param_debug: + self.results.pop('actions') + + if self.module._diff or self.param_debug: + self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff + + if self.facts: + self.results['container'] = self.facts + + def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): + delay = 1.0 + total_wait = 0 + while True: + # Inspect container + result = self.engine_driver.inspect_container_by_id(self.client, container_id) + if result is None: + if accept_removal: + return + msg = 'Encontered vanished container while waiting for container "{0}"' + self.fail(msg.format(container_id)) + # Check container state + state = result.get('State', {}).get('Status') + if complete_states is not None and state in complete_states: + return + if wait_states is not None and state not in wait_states: + msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' + self.fail(msg.format(container_id, state)) + # Wait + if max_wait is not None: + if total_wait > max_wait: + msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' + self.fail(msg.format(container_id, max_wait)) + if total_wait + delay > max_wait: + delay = max_wait - total_wait + sleep(delay) + total_wait += delay + # Exponential backoff, but never wait longer than 10 seconds + # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations + # until the maximal 10 seconds delay is reached. By then, the + # code will have slept for ~1.5 minutes.) + delay = min(delay * 1.1, 10) + + def _collect_params(self, active_options): + parameters = [] + for options in active_options: + values = {} + engine = options.get_engine(self.engine_driver.name) + for option in options.options: + if not option.not_an_ansible_option and self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values) + parameters.append((options, values)) + return parameters + + def present(self, state): + self.parameters = self._collect_params(self.options) + container = self._get_container(self.param_name) + was_running = container.running + was_paused = container.paused + container_created = False + + # If the image parameter was passed then we need to deal with the image + # version comparison. Otherwise we handle this depending on whether + # the container already runs or not; in the former case, in case the + # container needs to be restarted, we use the existing container's + # image ID. + image = self._get_image() + self.log(image, pretty_print=True) + if not container.exists or container.removing: + # New container + if container.removing: + self.log('Found container in removal phase') + else: + self.log('No container found') + if not self.param_image: + self.fail('Cannot create container when image is not specified!') + self.diff_tracker.add('exists', parameter=True, active=False) + if container.removing and not self.check_mode: + # Wait for container to be removed before trying to create it + self.wait_for_state( + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(self.param_image) + if new_container: + container = new_container + container_created = True + else: + # Existing container + different, differences = self.has_different_configuration(container, image) + image_different = False + if self.all_options['image'].comparison == 'strict': + image_different = self._image_is_different(image, container) + if image_different or different or self.param_recreate: + self.diff_tracker.merge(differences) + self.diff['differences'] = differences.get_legacy_docker_container_diffs() + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) + image_to_use = self.param_image + if not image_to_use and container and container.image: + image_to_use = container.image + if not image_to_use: + self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') + if container.running: + self.container_stop(container.id) + self.container_remove(container.id) + if not self.check_mode: + self.wait_for_state( + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(image_to_use) + if new_container: + container = new_container + container_created = True + + if container and container.exists: + container = self.update_limits(container, image) + container = self.update_networks(container, container_created) + + if state == 'started' and not container.running: + self.diff_tracker.add('running', parameter=True, active=was_running) + container = self.container_start(container.id) + elif state == 'started' and self.param_restart: + self.diff_tracker.add('running', parameter=True, active=was_running) + self.diff_tracker.add('restarted', parameter=True, active=False) + container = self.container_restart(container.id) + elif state == 'stopped' and container.running: + self.diff_tracker.add('running', parameter=False, active=was_running) + self.container_stop(container.id) + container = self._get_container(container.id) + + if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: + self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) + if not self.check_mode: + try: + if self.param_paused: + self.engine_driver.pause_container(self.client, container.id) + else: + self.engine_driver.unpause_container(self.client, container.id) + except Exception as exc: + self.fail("Error %s container %s: %s" % ( + "pausing" if self.param_paused else "unpausing", container.id, to_native(exc) + )) + container = self._get_container(container.id) + self.results['changed'] = True + self.results['actions'].append(dict(set_paused=self.param_paused)) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.param_name) + if container.exists: + if container.running: + self.diff_tracker.add('running', parameter=False, active=True) + self.container_stop(container.id) + self.diff_tracker.add('exists', parameter=False, active=True) + self.container_remove(container.id) + + def _output_logs(self, msg): + self.module.log(msg=msg) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + container = self.engine_driver.inspect_container_by_name(self.client, container) + return Container(container, self.engine_driver) + + def _get_image(self): + image_parameter = self.param_image + if not image_parameter: + self.log('No image specified') + return None + if is_image_name_id(image_parameter): + image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) + else: + repository, tag = parse_repository_tag(image_parameter) + if not tag: + tag = "latest" + image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) + if not image or self.param_pull: + if not self.check_mode: + self.log("Pull the image.") + image, alreadyToLatest = self.engine_driver.pull_image(self.client, repository, tag) + if alreadyToLatest: + self.results['changed'] = False + else: + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + elif not image: + # If the image isn't there, claim we'll pull. + # (Implicitly: if the image is there, claim it already was latest.) + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.image: + if image.get('Id') != container.image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image) + return True + return False + + def _compose_create_parameters(self, image): + params = {} + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if engine.can_set_value(self.engine_driver.get_api_version(self.client)): + engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values) + params['Image'] = image + return params + + def _record_differences(self, differences, options, param_values, engine, container, image): + container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options) + expected_values = engine.get_expected_values( + self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy()) + for option in options.options: + if option.name in expected_values: + param_value = expected_values[option.name] + container_value = container_values.get(option.name) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + + if not match: + # No match. + if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client), + option, image, container_value, param_value): + # Ignore the result + continue + + # Record the differences + p = param_value + c = container_value + if option.comparison_type == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif option.comparison_type == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if option.name == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(option.name, parameter=p, active=c) + + def has_different_configuration(self, container, image): + differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + self._record_differences(differences, options, param_values, engine, container, image) + has_differences = not differences.empty + return has_differences, differences + + def has_different_resource_limits(self, container, image): + differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): + continue + self._record_differences(differences, options, param_values, engine, container, image) + has_differences = not differences.empty + return has_differences, differences + + def _compose_update_parameters(self): + result = {} + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): + continue + engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values) + return result + + def update_limits(self, container, image): + limits_differ, different_limits = self.has_different_resource_limits(container, image) + if limits_differ: + self.log("limit differences:") + self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) + self.diff_tracker.merge(different_limits) + if limits_differ and not self.check_mode: + self.container_update(container.id, self._compose_update_parameters()) + return self._get_container(container.id) + return container + + def has_network_differences(self, container): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.module.params['networks']: + return different, differences + + if not container.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings']['Networks'] + for network in self.module.params['networks']: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self, container): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not container.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.module.params['networks']: + for expected_network in self.module.params['networks']: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def update_networks(self, container, container_created): + updated_container = container + if self.all_options['networks'].comparison != 'ignore' or container_created: + has_network_differences, network_differences = self.has_network_differences(container) + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + for netdiff in network_differences: + self.diff_tracker.add( + 'network.{0}'.format(netdiff['parameter']['name']), + parameter=netdiff['parameter'], + active=netdiff['container'] + ) + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: + has_extra_networks, extra_networks = self.has_extra_networks(container) + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + for extra_network in extra_networks: + self.diff_tracker.add( + 'network.{0}'.format(extra_network['name']), + active=extra_network + ) + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + to_native(exc))) + # connect to the network + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) + if not self.check_mode: + params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')} + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) + return self._get_container(container.id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + to_native(exc))) + return self._get_container(container.id) + + def container_create(self, image): + create_parameters = self._compose_create_parameters(image) + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters) + except Exception as exc: + self.fail("Error creating container: %s" % to_native(exc)) + return self._get_container(container_id) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.start_container(self.client, container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) + + if self.module.params['detach'] is False: + status = self.engine_driver.wait_for_container(self.client, container_id) + self.client.fail_results['status'] = status + self.results['status'] = status + + if self.module.params['auto_remove']: + output = "Cannot retrieve result as auto_remove is enabled" + if self.param_output_logs: + self.module.warn('Cannot output_logs if auto_remove is enabled!') + else: + output, real_output = self.engine_driver.get_container_output(self.client, container_id) + if real_output and self.param_output_logs: + self._output_logs(msg=output) + + if self.param_cleanup: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + if status != 0: + # Set `failed` to True and return output as msg + self.results['failed'] = True + self.results['msg'] = output + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.param_keep_volumes) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force) + except Exception as exc: + self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.update_container(self.client, container_id, update_parameters) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) + + def container_restart(self, container_id): + self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10) + except Exception as exc: + self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_stop(self, container_id): + if self.param_force_kill: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout']) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + + +def run_module(engine_driver): + module, active_options, client = engine_driver.setup( + argument_spec=dict( + cleanup=dict(type='bool', default=False), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), + default_host_ip=dict(type='str'), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), + keep_volumes=dict(type='bool', default=True), + kill_signal=dict(type='str'), + name=dict(type='str', required=True), + networks_cli_compatible=dict(type='bool', default=True), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), + restart=dict(type='bool', default=False), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + ), + required_if=[ + ('state', 'present', ['image']) + ], + ) + + def execute(): + cm = ContainerManager(module, engine_driver, client, active_options) + cm.run() + module.exit_json(**sanitize_result(cm.results)) + + engine_driver.run(execute, client) diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index 3e7d112a8..42f2d5b3d 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -1199,800 +1199,18 @@ sample: 0 ''' -import re -from time import sleep - -from ansible.module_utils.common.text.converters import to_native, to_text - -from ansible_collections.community.docker.plugins.module_utils.module_container import ( +from ansible_collections.community.docker.plugins.module_utils.module_container.docker_api import ( DockerAPIEngineDriver, ) -from ansible_collections.community.docker.plugins.module_utils.util import ( - DifferenceTracker, - DockerBaseClass, - compare_generic, - is_image_name_id, - sanitize_result, -) - -from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag - - -class Container(DockerBaseClass): - def __init__(self, container, engine_driver): - super(Container, self).__init__() - self.raw = container - self.id = None - self.image = None - self.container = container - self.engine_driver = engine_driver - if container: - self.id = engine_driver.get_container_id(container) - self.image = engine_driver.get_image_from_container(container) - self.log(self.container, pretty_print=True) - - @property - def exists(self): - return True if self.container else False - - @property - def removing(self): - return self.engine_driver.is_container_removing(self.container) if self.container else False - - @property - def running(self): - return self.engine_driver.is_container_running(self.container) if self.container else False - - @property - def paused(self): - return self.engine_driver.is_container_paused(self.container) if self.container else False - - -class ContainerManager(DockerBaseClass): - def __init__(self, module, engine_driver, client, active_options): - self.module = module - self.engine_driver = engine_driver - self.client = client - self.options = active_options - self.all_options = self._collect_all_options(active_options) - self.check_mode = self.module.check_mode - self.param_cleanup = self.module.params['cleanup'] - self.param_container_default_behavior = self.module.params['container_default_behavior'] - self.param_default_host_ip = self.module.params['default_host_ip'] - self.param_debug = self.module.params['debug'] - self.param_force_kill = self.module.params['force_kill'] - self.param_image = self.module.params['image'] - self.param_image_label_mismatch = self.module.params['image_label_mismatch'] - self.param_keep_volumes = self.module.params['keep_volumes'] - self.param_kill_signal = self.module.params['kill_signal'] - self.param_name = self.module.params['name'] - self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] - self.param_output_logs = self.module.params['output_logs'] - self.param_paused = self.module.params['paused'] - self.param_pull = self.module.params['pull'] - self.param_purge_networks = self.module.params['purge_networks'] - self.param_recreate = self.module.params['recreate'] - self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] - self.param_restart = self.module.params['restart'] - self.param_state = self.module.params['state'] - self._parse_comparisons() - self._update_params() - self.results = {'changed': False, 'actions': []} - self.diff = {} - self.diff_tracker = DifferenceTracker() - self.facts = {} - if self.param_default_host_ip: - valid_ip = False - if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): - valid_ip = True - if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): - valid_ip = True - if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): - self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) - valid_ip = True - if not valid_ip: - self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' - 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) - - def _collect_all_options(self, active_options): - all_options = {} - for options in active_options: - for option in options.options: - all_options[option.name] = option - return all_options - - def _collect_all_module_params(self): - all_module_options = set() - for option, data in self.module.argument_spec.items(): - all_module_options.add(option) - if 'aliases' in data: - for alias in data['aliases']: - all_module_options.add(alias) - return all_module_options - - def _parse_comparisons(self): - # Keep track of all module params and all option aliases - all_module_options = self._collect_all_module_params() - comp_aliases = {} - for option_name, option in self.all_options.items(): - if option.not_an_ansible_option: - continue - comp_aliases[option_name] = option_name - for alias in option.ansible_aliases: - comp_aliases[alias] = option_name - # Process legacy ignore options - if self.module.params['ignore_image']: - self.all_options['image'].comparison = 'ignore' - if self.param_purge_networks: - self.all_options['networks'].comparison = 'strict' - # Process comparsions specified by user - if self.module.params.get('comparisons'): - # If '*' appears in comparisons, process it first - if '*' in self.module.params['comparisons']: - value = self.module.params['comparisons']['*'] - if value not in ('strict', 'ignore'): - self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") - for option in self.all_options.values(): - if option.name == 'networks': - # `networks` is special: only update if - # some value is actually specified - if self.module.params['networks'] is None: - continue - option.comparison = value - # Now process all other comparisons. - comp_aliases_used = {} - for key, value in self.module.params['comparisons'].items(): - if key == '*': - continue - # Find main key - key_main = comp_aliases.get(key) - if key_main is None: - if key_main in all_module_options: - self.fail("The module option '%s' cannot be specified in the comparisons dict, " - "since it does not correspond to container's state!" % key) - if key not in self.all_options or self.all_options[key].not_an_ansible_option: - self.fail("Unknown module option '%s' in comparisons dict!" % key) - key_main = key - if key_main in comp_aliases_used: - self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) - comp_aliases_used[key_main] = key - # Check value and update accordingly - if value in ('strict', 'ignore'): - self.all_options[key_main].comparison = value - elif value == 'allow_more_present': - if self.all_options[key_main].comparison_type == 'value': - self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) - self.all_options[key_main].comparison = value - else: - self.fail("Unknown comparison mode '%s'!" % value) - # Copy values - for option in self.all_options.values(): - if option.copy_comparison_from is not None: - option.comparison = self.all_options[option.copy_comparison_from].comparison - # Check legacy values - if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': - self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': - self.module.warn('The purge_networks option has been overridden by the comparisons option!') - - def _update_params(self): - if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - self.module.params['network_mode'] = self.module.params['networks'][0]['name'] - if self.param_container_default_behavior == 'compatibility': - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory='0', - paused=False, - privileged=False, - read_only=False, - tty=False, - ) - for param, value in old_default_values.items(): - if self.module.params[param] is None: - self.module.params[param] = value - - def fail(self, *args, **kwargs): - self.client.fail(*args, **kwargs) - - def run(self): - if self.param_state in ('stopped', 'started', 'present'): - self.present(self.param_state) - elif self.param_state == 'absent': - self.absent() - - if not self.check_mode and not self.param_debug: - self.results.pop('actions') - - if self.module._diff or self.param_debug: - self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() - self.results['diff'] = self.diff - - if self.facts: - self.results['container'] = self.facts - - def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): - delay = 1.0 - total_wait = 0 - while True: - # Inspect container - result = self.engine_driver.inspect_container_by_id(self.client, container_id) - if result is None: - if accept_removal: - return - msg = 'Encontered vanished container while waiting for container "{0}"' - self.fail(msg.format(container_id)) - # Check container state - state = result.get('State', {}).get('Status') - if complete_states is not None and state in complete_states: - return - if wait_states is not None and state not in wait_states: - msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' - self.fail(msg.format(container_id, state)) - # Wait - if max_wait is not None: - if total_wait > max_wait: - msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' - self.fail(msg.format(container_id, max_wait)) - if total_wait + delay > max_wait: - delay = max_wait - total_wait - sleep(delay) - total_wait += delay - # Exponential backoff, but never wait longer than 10 seconds - # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations - # until the maximal 10 seconds delay is reached. By then, the - # code will have slept for ~1.5 minutes.) - delay = min(delay * 1.1, 10) - - def _collect_params(self, active_options): - parameters = [] - for options in active_options: - values = {} - engine = options.get_engine(self.engine_driver.name) - for option in options.options: - if not option.not_an_ansible_option and self.module.params[option.name] is not None: - values[option.name] = self.module.params[option.name] - values = options.preprocess(self.module, values) - engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values) - parameters.append((options, values)) - return parameters - - def present(self, state): - self.parameters = self._collect_params(self.options) - container = self._get_container(self.param_name) - was_running = container.running - was_paused = container.paused - container_created = False - - # If the image parameter was passed then we need to deal with the image - # version comparison. Otherwise we handle this depending on whether - # the container already runs or not; in the former case, in case the - # container needs to be restarted, we use the existing container's - # image ID. - image = self._get_image() - self.log(image, pretty_print=True) - if not container.exists or container.removing: - # New container - if container.removing: - self.log('Found container in removal phase') - else: - self.log('No container found') - if not self.param_image: - self.fail('Cannot create container when image is not specified!') - self.diff_tracker.add('exists', parameter=True, active=False) - if container.removing and not self.check_mode: - # Wait for container to be removed before trying to create it - self.wait_for_state( - container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) - new_container = self.container_create(self.param_image) - if new_container: - container = new_container - container_created = True - else: - # Existing container - different, differences = self.has_different_configuration(container, image) - image_different = False - if self.all_options['image'].comparison == 'strict': - image_different = self._image_is_different(image, container) - if image_different or different or self.param_recreate: - self.diff_tracker.merge(differences) - self.diff['differences'] = differences.get_legacy_docker_container_diffs() - if image_different: - self.diff['image_different'] = True - self.log("differences") - self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) - image_to_use = self.param_image - if not image_to_use and container and container.image: - image_to_use = container.image - if not image_to_use: - self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') - if container.running: - self.container_stop(container.id) - self.container_remove(container.id) - if not self.check_mode: - self.wait_for_state( - container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) - new_container = self.container_create(image_to_use) - if new_container: - container = new_container - container_created = True - - if container and container.exists: - container = self.update_limits(container, image) - container = self.update_networks(container, container_created) - - if state == 'started' and not container.running: - self.diff_tracker.add('running', parameter=True, active=was_running) - container = self.container_start(container.id) - elif state == 'started' and self.param_restart: - self.diff_tracker.add('running', parameter=True, active=was_running) - self.diff_tracker.add('restarted', parameter=True, active=False) - container = self.container_restart(container.id) - elif state == 'stopped' and container.running: - self.diff_tracker.add('running', parameter=False, active=was_running) - self.container_stop(container.id) - container = self._get_container(container.id) - - if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: - self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) - if not self.check_mode: - try: - if self.param_paused: - self.engine_driver.pause_container(self.client, container.id) - else: - self.engine_driver.unpause_container(self.client, container.id) - except Exception as exc: - self.fail("Error %s container %s: %s" % ( - "pausing" if self.param_paused else "unpausing", container.id, to_native(exc) - )) - container = self._get_container(container.id) - self.results['changed'] = True - self.results['actions'].append(dict(set_paused=self.param_paused)) - - self.facts = container.raw - - def absent(self): - container = self._get_container(self.param_name) - if container.exists: - if container.running: - self.diff_tracker.add('running', parameter=False, active=True) - self.container_stop(container.id) - self.diff_tracker.add('exists', parameter=False, active=True) - self.container_remove(container.id) - - def _output_logs(self, msg): - self.module.log(msg=msg) - - def _get_container(self, container): - ''' - Expects container ID or Name. Returns a container object - ''' - container = self.engine_driver.inspect_container_by_name(self.client, container) - return Container(container, self.engine_driver) - - def _get_image(self): - image_parameter = self.param_image - if not image_parameter: - self.log('No image specified') - return None - if is_image_name_id(image_parameter): - image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) - else: - repository, tag = parse_repository_tag(image_parameter) - if not tag: - tag = "latest" - image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) - if not image or self.param_pull: - if not self.check_mode: - self.log("Pull the image.") - image, alreadyToLatest = self.engine_driver.pull_image(self.client, repository, tag) - if alreadyToLatest: - self.results['changed'] = False - else: - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - elif not image: - # If the image isn't there, claim we'll pull. - # (Implicitly: if the image is there, claim it already was latest.) - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - - self.log("image") - self.log(image, pretty_print=True) - return image - def _image_is_different(self, image, container): - if image and image.get('Id'): - if container and container.image: - if image.get('Id') != container.image: - self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image) - return True - return False - - def _compose_create_parameters(self, image): - params = {} - for options, values in self.parameters: - engine = options.get_engine(self.engine_driver.name) - if engine.can_set_value(self.engine_driver.get_api_version(self.client)): - engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values) - params['Image'] = image - return params - - def _record_differences(self, differences, options, param_values, engine, container, image): - container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options) - expected_values = engine.get_expected_values( - self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy()) - for option in options.options: - if option.name in expected_values: - param_value = expected_values[option.name] - container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) - - if not match: - # No match. - if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client), - option, image, container_value, param_value): - # Ignore the result - continue - - # Record the differences - p = param_value - c = container_value - if option.comparison_type == 'set': - # Since the order does not matter, sort so that the diff output is better. - if p is not None: - p = sorted(p) - if c is not None: - c = sorted(c) - elif option.comparison_type == 'set(dict)': - # Since the order does not matter, sort so that the diff output is better. - if option.name == 'expected_mounts': - # For selected values, use one entry as key - def sort_key_fn(x): - return x['target'] - else: - # We sort the list of dictionaries by using the sorted items of a dict as its key. - def sort_key_fn(x): - return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) - if p is not None: - p = sorted(p, key=sort_key_fn) - if c is not None: - c = sorted(c, key=sort_key_fn) - differences.add(option.name, parameter=p, active=c) - - def has_different_configuration(self, container, image): - differences = DifferenceTracker() - for options, param_values in self.parameters: - engine = options.get_engine(self.engine_driver.name) - self._record_differences(differences, options, param_values, engine, container, image) - has_differences = not differences.empty - return has_differences, differences - - def has_different_resource_limits(self, container, image): - differences = DifferenceTracker() - for options, param_values in self.parameters: - engine = options.get_engine(self.engine_driver.name) - if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): - continue - self._record_differences(differences, options, param_values, engine, container, image) - has_differences = not differences.empty - return has_differences, differences - - def _compose_update_parameters(self): - result = {} - for options, values in self.parameters: - engine = options.get_engine(self.engine_driver.name) - if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): - continue - engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values) - return result - - def update_limits(self, container, image): - limits_differ, different_limits = self.has_different_resource_limits(container, image) - if limits_differ: - self.log("limit differences:") - self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) - self.diff_tracker.merge(different_limits) - if limits_differ and not self.check_mode: - self.container_update(container.id, self._compose_update_parameters()) - return self._get_container(container.id) - return container - - def has_network_differences(self, container): - ''' - Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 - ''' - different = False - differences = [] - - if not self.module.params['networks']: - return different, differences - - if not container.container.get('NetworkSettings'): - self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = container.container['NetworkSettings']['Networks'] - for network in self.module.params['networks']: - network_info = connected_networks.get(network['name']) - if network_info is None: - different = True - differences.append(dict( - parameter=network, - container=None - )) - else: - diff = False - network_info_ipam = network_info.get('IPAMConfig') or {} - if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): - diff = True - if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): - diff = True - if network.get('aliases'): - if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): - diff = True - if network.get('links'): - expected_links = [] - for link, alias in network['links']: - expected_links.append("%s:%s" % (link, alias)) - if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): - diff = True - if diff: - different = True - differences.append(dict( - parameter=network, - container=dict( - name=network['name'], - ipv4_address=network_info_ipam.get('IPv4Address'), - ipv6_address=network_info_ipam.get('IPv6Address'), - aliases=network_info.get('Aliases'), - links=network_info.get('Links') - ) - )) - return different, differences - - def has_extra_networks(self, container): - ''' - Check if the container is connected to non-requested networks - ''' - extra_networks = [] - extra = False - - if not container.container.get('NetworkSettings'): - self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = container.container['NetworkSettings'].get('Networks') - if connected_networks: - for network, network_config in connected_networks.items(): - keep = False - if self.module.params['networks']: - for expected_network in self.module.params['networks']: - if expected_network['name'] == network: - keep = True - if not keep: - extra = True - extra_networks.append(dict(name=network, id=network_config['NetworkID'])) - return extra, extra_networks - - def update_networks(self, container, container_created): - updated_container = container - if self.all_options['networks'].comparison != 'ignore' or container_created: - has_network_differences, network_differences = self.has_network_differences(container) - if has_network_differences: - if self.diff.get('differences'): - self.diff['differences'].append(dict(network_differences=network_differences)) - else: - self.diff['differences'] = [dict(network_differences=network_differences)] - for netdiff in network_differences: - self.diff_tracker.add( - 'network.{0}'.format(netdiff['parameter']['name']), - parameter=netdiff['parameter'], - active=netdiff['container'] - ) - self.results['changed'] = True - updated_container = self._add_networks(container, network_differences) - - if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: - has_extra_networks, extra_networks = self.has_extra_networks(container) - if has_extra_networks: - if self.diff.get('differences'): - self.diff['differences'].append(dict(purge_networks=extra_networks)) - else: - self.diff['differences'] = [dict(purge_networks=extra_networks)] - for extra_network in extra_networks: - self.diff_tracker.add( - 'network.{0}'.format(extra_network['name']), - active=extra_network - ) - self.results['changed'] = True - updated_container = self._purge_networks(container, extra_networks) - return updated_container - - def _add_networks(self, container, differences): - for diff in differences: - # remove the container from the network, if connected - if diff.get('container'): - self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) - if not self.check_mode: - try: - self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id']) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], - to_native(exc))) - # connect to the network - self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) - if not self.check_mode: - params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')} - try: - self.log("Connecting container to network %s" % diff['parameter']['id']) - self.log(params, pretty_print=True) - self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params) - except Exception as exc: - self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) - return self._get_container(container.id) - - def _purge_networks(self, container, networks): - for network in networks: - self.results['actions'].append(dict(removed_from_network=network['name'])) - if not self.check_mode: - try: - self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name']) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (network['name'], - to_native(exc))) - return self._get_container(container.id) - - def container_create(self, image): - create_parameters = self._compose_create_parameters(image) - self.log("create container") - self.log("image: %s parameters:" % image) - self.log(create_parameters, pretty_print=True) - self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) - self.results['changed'] = True - new_container = None - if not self.check_mode: - try: - container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters) - except Exception as exc: - self.fail("Error creating container: %s" % to_native(exc)) - return self._get_container(container_id) - return new_container - - def container_start(self, container_id): - self.log("start container %s" % (container_id)) - self.results['actions'].append(dict(started=container_id)) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.start_container(self.client, container_id) - except Exception as exc: - self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) - - if self.module.params['detach'] is False: - status = self.engine_driver.wait_for_container(self.client, container_id) - self.client.fail_results['status'] = status - self.results['status'] = status - - if self.module.params['auto_remove']: - output = "Cannot retrieve result as auto_remove is enabled" - if self.param_output_logs: - self.module.warn('Cannot output_logs if auto_remove is enabled!') - else: - output, real_output = self.engine_driver.get_container_output(self.client, container_id) - if real_output and self.param_output_logs: - self._output_logs(msg=output) - - if self.param_cleanup: - self.container_remove(container_id, force=True) - insp = self._get_container(container_id) - if insp.raw: - insp.raw['Output'] = output - else: - insp.raw = dict(Output=output) - if status != 0: - # Set `failed` to True and return output as msg - self.results['failed'] = True - self.results['msg'] = output - return insp - return self._get_container(container_id) - - def container_remove(self, container_id, link=False, force=False): - volume_state = (not self.param_keep_volumes) - self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) - self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force) - except Exception as exc: - self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - - def container_update(self, container_id, update_parameters): - if update_parameters: - self.log("update container %s" % (container_id)) - self.log(update_parameters, pretty_print=True) - self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.update_container(self.client, container_id, update_parameters) - except Exception as exc: - self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_kill(self, container_id): - self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal) - except Exception as exc: - self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) - - def container_restart(self, container_id): - self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10) - except Exception as exc: - self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_stop(self, container_id): - if self.param_force_kill: - self.container_kill(container_id) - return - self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) - self.results['changed'] = True - if not self.check_mode: - try: - self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout']) - except Exception as exc: - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) +from ansible_collections.community.docker.plugins.module_utils.module_container.module import ( + run_module, +) def main(): engine_driver = DockerAPIEngineDriver() - - module, active_options, client = engine_driver.setup( - argument_spec=dict( - cleanup=dict(type='bool', default=False), - comparisons=dict(type='dict'), - container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), - default_host_ip=dict(type='str'), - force_kill=dict(type='bool', default=False, aliases=['forcekill']), - ignore_image=dict(type='bool', default=False), - image=dict(type='str'), - image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - keep_volumes=dict(type='bool', default=True), - kill_signal=dict(type='str'), - name=dict(type='str', required=True), - networks_cli_compatible=dict(type='bool', default=True), - output_logs=dict(type='bool', default=False), - paused=dict(type='bool'), - pull=dict(type='bool', default=False), - purge_networks=dict(type='bool', default=False), - recreate=dict(type='bool', default=False), - removal_wait_timeout=dict(type='float'), - restart=dict(type='bool', default=False), - state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), - ), - required_if=[ - ('state', 'present', ['image']) - ], - ) - - def execute(): - cm = ContainerManager(module, engine_driver, client, active_options) - cm.run() - module.exit_json(**sanitize_result(cm.results)) - - engine_driver.run(execute, client) + run_module(engine_driver) if __name__ == '__main__': From bcdccbd4c101a536f981a6867c66e1f9a1d77de1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 12 Jul 2022 11:27:58 +0200 Subject: [PATCH 29/38] Add Python 2.6 ignore.txt entries for ansible-core < 2.12. --- tests/sanity/ignore-2.10.txt | 3 +++ tests/sanity/ignore-2.11.txt | 3 +++ tests/sanity/ignore-2.9.txt | 3 +++ 3 files changed, 9 insertions(+) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index f999281cb..f1a910e3d 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,3 +5,6 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index f999281cb..f1a910e3d 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -5,3 +5,6 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index a0dc80661..4d39d8bb6 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -4,3 +4,6 @@ .azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax From e714d8d0bb5e0c2d88d28d31eb077798e103ecf0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 12 Jul 2022 21:57:09 +0200 Subject: [PATCH 30/38] Improve healthcheck handling. --- plugins/module_utils/module_container/base.py | 11 ++- .../module_container/docker_api.py | 21 +++-- plugins/module_utils/util.py | 78 ++++++++++--------- 3 files changed, 62 insertions(+), 48 deletions(-) diff --git a/plugins/module_utils/module_container/base.py b/plugins/module_utils/module_container/base.py index dba79df0c..97b84bf04 100644 --- a/plugins/module_utils/module_container/base.py +++ b/plugins/module_utils/module_container/base.py @@ -18,6 +18,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import ( clean_dict_booleans_for_docker_api, + normalize_healthcheck, omit_none_from_dict, ) @@ -428,6 +429,14 @@ def _preprocess_env(module, values): } +def _preprocess_healthcheck(module, values): + if not values: + return {} + return { + 'healthcheck': normalize_healthcheck(values['healthcheck'], normalize_test=False), + } + + def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): if name not in values: return values @@ -873,7 +882,7 @@ def _preprocess_ports(module, values): ) OPTION_HEALTHCHECK = ( - OptionGroup() + OptionGroup(preprocess=_preprocess_healthcheck) .add_option('healthcheck', type='dict', ansible_suboptions=dict( test=dict(type='raw'), interval=dict(type='str'), diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py index 264c60d05..2755a6350 100644 --- a/plugins/module_utils/module_container/docker_api.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -90,8 +90,8 @@ ) from ansible_collections.community.docker.plugins.module_utils.util import ( + normalize_healthcheck_test, omit_none_from_dict, - parse_healthcheck, ) from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion @@ -709,17 +709,16 @@ def _preprocess_etc_hosts(module, client, api_version, value): def _preprocess_healthcheck(module, client, api_version, value): if value is None: return value - healthcheck, disable_healthcheck = parse_healthcheck(value) - if disable_healthcheck: - healthcheck = {'test': ['NONE']} - if not healthcheck: - return None + if not value or not value.get('test'): + value = {'test': ['NONE']} + elif 'test' in value: + value['test'] = normalize_healthcheck_test(value['test']) return omit_none_from_dict({ - 'Test': healthcheck.get('test'), - 'Interval': healthcheck.get('interval'), - 'Timeout': healthcheck.get('timeout'), - 'StartPeriod': healthcheck.get('start_period'), - 'Retries': healthcheck.get('retries'), + 'Test': value.get('test'), + 'Interval': value.get('interval'), + 'Timeout': value.get('timeout'), + 'StartPeriod': value.get('start_period'), + 'Retries': value.get('retries'), }) diff --git a/plugins/module_utils/util.py b/plugins/module_utils/util.py index b09017da7..6d1a14bd7 100644 --- a/plugins/module_utils/util.py +++ b/plugins/module_utils/util.py @@ -331,52 +331,58 @@ def convert_duration_to_nanosecond(time_str): return time_in_nanoseconds -def parse_healthcheck(healthcheck): +def normalize_healthcheck_test(test): + if isinstance(test, (tuple, list)): + return [str(e) for e in test] + return ['CMD-SHELL', str(test)] + + +def normalize_healthcheck(healthcheck, normalize_test=False): """ - Return dictionary of healthcheck parameters and boolean if - healthcheck defined in image was requested to be disabled. + Return dictionary of healthcheck parameters. """ - if (not healthcheck) or (not healthcheck.get('test')): - return None, None - result = dict() # All supported healthcheck parameters - options = dict( - test='test', - interval='interval', - timeout='timeout', - start_period='start_period', - retries='retries' - ) + options = ('test', 'interval', 'timeout', 'start_period', 'retries') - duration_options = ['interval', 'timeout', 'start_period'] + duration_options = ('interval', 'timeout', 'start_period') - for (key, value) in options.items(): - if value in healthcheck: - if healthcheck.get(value) is None: + for key in options: + if key in healthcheck: + value = healthcheck[key] + if value is None: # due to recursive argument_spec, all keys are always present # (but have default value None if not specified) continue - if value in duration_options: - time = convert_duration_to_nanosecond(healthcheck.get(value)) - if time: - result[key] = time - elif healthcheck.get(value): - result[key] = healthcheck.get(value) - if key == 'test': - if isinstance(result[key], (tuple, list)): - result[key] = [str(e) for e in result[key]] - else: - result[key] = ['CMD-SHELL', str(result[key])] - elif key == 'retries': - try: - result[key] = int(result[key]) - except ValueError: - raise ValueError( - 'Cannot parse number of retries for healthcheck. ' - 'Expected an integer, got "{0}".'.format(result[key]) - ) + if key in duration_options: + value = convert_duration_to_nanosecond(value) + if not value: + continue + if key == 'retries': + try: + value = int(value) + except ValueError: + raise ValueError( + 'Cannot parse number of retries for healthcheck. ' + 'Expected an integer, got "{0}".'.format(value) + ) + if key == 'test' and normalize_test: + value = normalize_healthcheck_test(value) + result[key] = value + + return result + + +def parse_healthcheck(healthcheck): + """ + Return dictionary of healthcheck parameters and boolean if + healthcheck defined in image was requested to be disabled. + """ + if (not healthcheck) or (not healthcheck.get('test')): + return None, None + + result = normalize_healthcheck(healthcheck, normalize_test=True) if result['test'] == ['NONE']: # If the user explicitly disables the healthcheck, return None From 5677d30e86dfe7803c7a218c431548342e15ae43 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 12 Jul 2022 22:25:32 +0200 Subject: [PATCH 31/38] Fix container removal logic. --- plugins/module_utils/module_container/docker_api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py index 2755a6350..d78a730e1 100644 --- a/plugins/module_utils/module_container/docker_api.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -352,7 +352,8 @@ def remove_container(self, client, container_id, remove_volumes=False, link=Fals continue if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: pass - raise + else: + raise # We only loop when explicitly requested by 'continue' break From 04df1b2fb0e0cfb1106949574ed7cad317399a99 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 12 Jul 2022 22:44:01 +0200 Subject: [PATCH 32/38] ci_complete From 1a7d03aa4f1478b3095751569e92f5ff0baf186d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 06:54:24 +0200 Subject: [PATCH 33/38] Remove handling of older Docker SDK for Pyhon versions from integration tests. --- .../targets/docker_container/tasks/main.yml | 5 +- .../tasks/tests/mounts-volumes.yml | 22 - .../docker_container/tasks/tests/network.yml | 1127 ++++++++--------- .../docker_container/tasks/tests/options.yml | 160 +-- 4 files changed, 561 insertions(+), 753 deletions(-) diff --git a/tests/integration/targets/docker_container/tasks/main.yml b/tests/integration/targets/docker_container/tasks/main.yml index b07a41a62..4a1204f3f 100644 --- a/tests/integration/targets/docker_container/tasks/main.yml +++ b/tests/integration/targets/docker_container/tasks/main.yml @@ -53,10 +53,9 @@ state: absent force: yes with_items: "{{ dnetworks }}" - when: docker_py_version is version('1.10.0', '>=') diff: no - when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.25', '>=') + when: docker_api_version is version('1.25', '>=') - fail: msg="Too old docker / docker-py version to run all docker_container tests!" - when: not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) + when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml b/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml index 2475f9a90..8cb08e3cf 100644 --- a/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml +++ b/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml @@ -33,7 +33,6 @@ type: bind read_only: no register: mounts_1 - ignore_errors: yes - name: mounts (idempotency) docker_container: @@ -50,7 +49,6 @@ target: /tmp type: bind register: mounts_2 - ignore_errors: yes - name: mounts (less mounts) docker_container: @@ -63,7 +61,6 @@ target: /tmp type: bind register: mounts_3 - ignore_errors: yes - name: mounts (more mounts) docker_container: @@ -81,7 +78,6 @@ read_only: yes force_kill: yes register: mounts_4 - ignore_errors: yes - name: mounts (different modes) docker_container: @@ -99,7 +95,6 @@ read_only: no force_kill: yes register: mounts_5 - ignore_errors: yes - name: mounts (endpoint collision) docker_container: @@ -161,13 +156,6 @@ - "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg" - mounts_7 is changed - mounts_8 is not changed - when: docker_py_version is version('2.6.0', '>=') -- assert: - that: - - mounts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" - - "'Minimum version required is 2.6.0 ' in mounts_1.msg" - when: docker_py_version is version('2.6.0', '<') #################################################################### ## mounts + volumes ################################################ @@ -187,7 +175,6 @@ volumes: - /tmp:/tmp register: mounts_volumes_1 - ignore_errors: yes - name: mounts + volumes (idempotency) docker_container: @@ -203,7 +190,6 @@ volumes: - /tmp:/tmp register: mounts_volumes_2 - ignore_errors: yes - name: mounts + volumes (switching) docker_container: @@ -220,7 +206,6 @@ - /:/whatever:ro force_kill: yes register: mounts_volumes_3 - ignore_errors: yes - name: mounts + volumes (collision, should fail) docker_container: @@ -253,13 +238,6 @@ - mounts_volumes_3 is changed - mounts_volumes_4 is failed - "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg" - when: docker_py_version is version('2.6.0', '>=') -- assert: - that: - - mounts_volumes_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" - - "'Minimum version required is 2.6.0 ' in mounts_1.msg" - when: docker_py_version is version('2.6.0', '<') #################################################################### ## volume_driver ################################################### diff --git a/tests/integration/targets/docker_container/tasks/tests/network.yml b/tests/integration/targets/docker_container/tasks/tests/network.yml index 028fca3a8..55555ad6b 100644 --- a/tests/integration/targets/docker_container/tasks/tests/network.yml +++ b/tests/integration/targets/docker_container/tasks/tests/network.yml @@ -20,7 +20,6 @@ - "{{ nname_2 }}" loop_control: loop_var: network_name - when: docker_py_version is version('1.10.0', '>=') - set_fact: subnet_ipv4_base: 10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }} @@ -57,7 +56,6 @@ - subnet: "{{ subnet_ipv4 }}" - subnet: "{{ subnet_ipv6 }}" state: present - when: docker_py_version is version('1.10.0', '>=') #################################################################### ## network_mode #################################################### @@ -147,589 +145,577 @@ ## networks, purge_networks for networks_cli_compatible=no ######### #################################################################### -- block: - - name: networks_cli_compatible=no, networks w/o purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: no - register: networks_1 - - - name: networks_cli_compatible=no, networks w/o purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: no - register: networks_2 - - - name: networks_cli_compatible=no, networks, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - - name: "{{ nname_1 }}" - networks_cli_compatible: no - force_kill: yes - register: networks_3 - - - name: networks_cli_compatible=no, networks, purge_networks (idempotency) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: "{{ nname_1 }}" - - name: bridge - networks_cli_compatible: no - register: networks_4 - - - name: networks_cli_compatible=no, networks (less networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: bridge - networks_cli_compatible: no - register: networks_5 - - - name: networks_cli_compatible=no, networks, purge_networks (less networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - networks_cli_compatible: no - force_kill: yes - register: networks_6 - - - name: networks_cli_compatible=no, networks, purge_networks (more networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - - name: "{{ nname_2 }}" - networks_cli_compatible: no - force_kill: yes - register: networks_7 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - # networks_1 has networks default, 'bridge', nname_1 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 3 - - nname_1 in networks_1.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" - # networks_2 has networks default, 'bridge', nname_1 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 3 - - nname_1 in networks_2.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" - # networks_3 has networks 'bridge', nname_1 - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_3.container.NetworkSettings.Networks - - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" - # networks_4 has networks 'bridge', nname_1 - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_4.container.NetworkSettings.Networks - - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" - # networks_5 has networks 'bridge', nname_1 - - networks_5 is not changed - - networks_5.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_5.container.NetworkSettings.Networks - - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks" - # networks_6 has networks 'bridge' - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" - # networks_7 has networks 'bridge', nname_2 - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks | length == 2 - - nname_2 in networks_7.container.NetworkSettings.Networks - - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" - - when: docker_py_version is version('1.10.0', '>=') +- name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_1 + +- name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_2 + +- name: networks_cli_compatible=no, networks, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_1 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_3 + +- name: networks_cli_compatible=no, networks, purge_networks (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: "{{ nname_1 }}" + - name: bridge + networks_cli_compatible: no + register: networks_4 + +- name: networks_cli_compatible=no, networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: bridge + networks_cli_compatible: no + register: networks_5 + +- name: networks_cli_compatible=no, networks, purge_networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + networks_cli_compatible: no + force_kill: yes + register: networks_6 + +- name: networks_cli_compatible=no, networks, purge_networks (more networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_2 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_7 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + # networks_1 has networks default, 'bridge', nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_2 has networks default, 'bridge', nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_3 has networks 'bridge', nname_1 + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_3.container.NetworkSettings.Networks + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge', nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_4.container.NetworkSettings.Networks + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has networks 'bridge', nname_1 + - networks_5 is not changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks" + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge', nname_2 + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 2 + - nname_2 in networks_7.container.NetworkSettings.Networks + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" #################################################################### ## networks for networks_cli_compatible=yes ######################## #################################################################### -- block: - - name: networks_cli_compatible=yes, networks specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - aliases: - - alias1 - - alias2 - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: networks_cli_compatible=yes, networks specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - register: networks_2 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - name: networks_cli_compatible=yes, empty networks list specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - register: networks_3 - - - name: networks_cli_compatible=yes, empty networks list specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - register: networks_4 - - - name: networks_cli_compatible=yes, empty networks list specified, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - purge_networks: yes - force_kill: yes - register: networks_5 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - name: networks_cli_compatible=yes, networks not specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - force_kill: yes - register: networks_6 - - - name: networks_cli_compatible=yes, networks not specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - register: networks_7 - - - name: networks_cli_compatible=yes, networks not specified, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - purge_networks: yes - force_kill: yes - register: networks_8 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - debug: var=networks_3 - - - assert: - that: - # networks_1 has networks nname_1, nname_2 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_1.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - # networks_2 has networks nname_1, nname_2 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_2.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - # networks_3 has networks 'bridge' - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" - # networks_4 has networks 'bridge' - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" - # networks_5 has no networks - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks | length == 0 - # networks_6 has networks 'bridge' - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" - # networks_7 has networks 'bridge' - - networks_7 is not changed - - networks_7.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" - # networks_8 has no networks - - networks_8 is changed - - networks_8.container.NetworkSettings.Networks | length == 0 - - when: docker_py_version is version('1.10.0', '>=') +- name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + aliases: + - alias1 + - alias2 + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_2 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_3 + +- name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_4 + +- name: networks_cli_compatible=yes, empty networks list specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + +- name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + register: networks_7 + +- name: networks_cli_compatible=yes, networks not specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_8 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- debug: var=networks_3 + +- assert: + that: + # networks_1 has networks nname_1, nname_2 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1, nname_2 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_3 has networks 'bridge' + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge' + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has no networks + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 0 + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge' + - networks_7 is not changed + - networks_7.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" + # networks_8 has no networks + - networks_8 is changed + - networks_8.container.NetworkSettings.Networks | length == 0 #################################################################### ## networks with comparisons ####################################### #################################################################### -- block: - - name: create container with one network - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: different networks, comparisons=ignore - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - network_mode: ignore # otherwise we'd have to set network_mode to nname_1 - networks: ignore - register: networks_2 - - - name: less networks, comparisons=ignore - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: ignore - register: networks_3 - - - name: less networks, comparisons=allow_more_present - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: allow_more_present - register: networks_4 - - - name: different networks, comparisons=allow_more_present - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - network_mode: ignore # otherwise we'd have to set network_mode to nname_1 - networks: allow_more_present - force_kill: yes - register: networks_5 - - - name: different networks, comparisons=strict - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - networks: strict - force_kill: yes - register: networks_6 - - - name: less networks, comparisons=strict - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: strict - force_kill: yes - register: networks_7 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - # networks_1 has networks nname_1 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_1.container.NetworkSettings.Networks - # networks_2 has networks nname_1 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_2.container.NetworkSettings.Networks - # networks_3 has networks nname_1 - - networks_3 is not changed - - networks_3.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_3.container.NetworkSettings.Networks - # networks_4 has networks nname_1 - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_4.container.NetworkSettings.Networks - # networks_5 has networks nname_1, nname_2 - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_5.container.NetworkSettings.Networks - - nname_2 in networks_5.container.NetworkSettings.Networks - # networks_6 has networks nname_2 - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - nname_2 in networks_6.container.NetworkSettings.Networks - # networks_7 has no networks - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks | length == 0 - - when: docker_py_version is version('1.10.0', '>=') +- name: create container with one network + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: different networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + network_mode: ignore # otherwise we'd have to set network_mode to nname_1 + networks: ignore + register: networks_2 + +- name: less networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: ignore + register: networks_3 + +- name: less networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: allow_more_present + register: networks_4 + +- name: different networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + network_mode: ignore # otherwise we'd have to set network_mode to nname_1 + networks: allow_more_present + force_kill: yes + register: networks_5 + +- name: different networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_6 + +- name: less networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_7 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + # networks_1 has networks nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_2.container.NetworkSettings.Networks + # networks_3 has networks nname_1 + - networks_3 is not changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_3.container.NetworkSettings.Networks + # networks_4 has networks nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_4.container.NetworkSettings.Networks + # networks_5 has networks nname_1, nname_2 + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - nname_2 in networks_5.container.NetworkSettings.Networks + # networks_6 has networks nname_2 + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - nname_2 in networks_6.container.NetworkSettings.Networks + # networks_7 has no networks + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 0 #################################################################### ## networks with IP address ######################################## #################################################################### -- block: - - name: create container (stopped) with one network and fixed IP - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_2 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: create container (stopped) with one network and fixed IP (idempotent) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_2 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_2 - - - name: create container (stopped) with one network and fixed IP (different IPv4) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_3 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_3 - - - name: create container (stopped) with one network and fixed IP (different IPv6) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_3 }}" - ipv6_address: "{{ nname_3_ipv6_3 }}" - networks_cli_compatible: yes - register: networks_4 - - - name: create container (started) with one network and fixed IP - docker_container: - name: "{{ cname }}" - state: started - register: networks_5 - - - name: create container (started) with one network and fixed IP (different IPv4) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_3 }}" - networks_cli_compatible: yes - force_kill: yes - register: networks_6 - - - name: create container (started) with one network and fixed IP (different IPv6) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_4 }}" - networks_cli_compatible: yes - force_kill: yes - register: networks_7 - - - name: create container (started) with one network and fixed IP (idempotent) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_4 }}" - networks_cli_compatible: yes - register: networks_8 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 - - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 - - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_4 is changed - - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3 - - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_8 is not changed - - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - when: docker_py_version is version('1.10.0', '>=') +- name: create container (stopped) with one network and fixed IP + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: create container (stopped) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_2 + +- name: create container (stopped) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_3 + +- name: create container (stopped) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + register: networks_4 + +- name: create container (started) with one network and fixed IP + docker_container: + name: "{{ cname }}" + state: started + register: networks_5 + +- name: create container (started) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + +- name: create container (started) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_7 + +- name: create container (started) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + register: networks_8 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_4 is changed + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8 is not changed + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr #################################################################### #################################################################### @@ -746,4 +732,3 @@ - "{{ nname_3 }}" loop_control: loop_var: network_name - when: docker_py_version is version('1.10.0', '>=') diff --git a/tests/integration/targets/docker_container/tasks/tests/options.yml b/tests/integration/targets/docker_container/tasks/tests/options.yml index 4b898b7e8..4a0ccf775 100644 --- a/tests/integration/targets/docker_container/tasks/tests/options.yml +++ b/tests/integration/targets/docker_container/tasks/tests/options.yml @@ -21,7 +21,6 @@ state: started auto_remove: yes register: auto_remove_1 - ignore_errors: yes - name: Give container 1 second to be sure it terminated pause: @@ -32,19 +31,11 @@ name: "{{ cname }}" state: absent register: auto_remove_2 - ignore_errors: yes - assert: that: - auto_remove_1 is changed - auto_remove_2 is not changed - when: docker_py_version is version('2.1.0', '>=') -- assert: - that: - - auto_remove_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in auto_remove_1.msg" - - "'Minimum version required is 2.1.0 ' in auto_remove_1.msg" - when: docker_py_version is version('2.1.0', '<') #################################################################### ## blkio_weight #################################################### @@ -573,7 +564,6 @@ name: "{{ cname }}" cpus: 1 state: started - ignore_errors: yes register: cpus_1 - name: cpus (idempotency) @@ -583,7 +573,6 @@ name: "{{ cname }}" cpus: 1 state: started - ignore_errors: yes register: cpus_2 - name: cpus (change) @@ -596,7 +585,6 @@ force_kill: yes # This will fail if the system the test is run on doesn't have # multiple MEMs available. - ignore_errors: yes register: cpus_3 - name: cleanup @@ -611,13 +599,6 @@ - cpus_1 is changed - cpus_2 is not changed and cpus_2 is not failed - cpus_3 is failed or cpus_3 is changed - when: docker_py_version is version('2.3.0', '>=') -- assert: - that: - - cpus_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in cpus_1.msg" - - "'Minimum version required is 2.3.0 ' in cpus_1.msg" - when: docker_py_version is version('2.3.0', '<') #################################################################### ## debug ########################################################### @@ -741,11 +722,8 @@ - detach_cleanup_nonzero.status == 42 - "'Output' in detach_cleanup_nonzero.container" - "detach_cleanup_nonzero.container.Output == ''" -- assert: - that: - "'Cannot retrieve result as auto_remove is enabled' == detach_auto_remove.container.Output" - detach_auto_remove_cleanup is not changed - when: docker_py_version is version('2.1.0', '>=') #################################################################### ## devices ######################################################### @@ -825,7 +803,6 @@ - path: /dev/urandom rate: 10K register: device_read_bps_1 - ignore_errors: yes - name: device_read_bps (idempotency) docker_container: @@ -839,7 +816,6 @@ - path: /dev/random rate: 20M register: device_read_bps_2 - ignore_errors: yes - name: device_read_bps (lesser entries) docker_container: @@ -851,7 +827,6 @@ - path: /dev/random rate: 20M register: device_read_bps_3 - ignore_errors: yes - name: device_read_bps (changed) docker_container: @@ -866,7 +841,6 @@ rate: 5K force_kill: yes register: device_read_bps_4 - ignore_errors: yes - name: cleanup docker_container: @@ -881,13 +855,6 @@ - device_read_bps_2 is not changed - device_read_bps_3 is not changed - device_read_bps_4 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_read_bps_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_read_bps_1.msg" - - "'Minimum version required is 1.9.0 ' in device_read_bps_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_read_iops ################################################ @@ -905,7 +872,6 @@ - path: /dev/urandom rate: 20 register: device_read_iops_1 - ignore_errors: yes - name: device_read_iops (idempotency) docker_container: @@ -919,7 +885,6 @@ - path: /dev/random rate: 10 register: device_read_iops_2 - ignore_errors: yes - name: device_read_iops (less) docker_container: @@ -931,7 +896,6 @@ - path: /dev/random rate: 10 register: device_read_iops_3 - ignore_errors: yes - name: device_read_iops (changed) docker_container: @@ -946,7 +910,6 @@ rate: 50 force_kill: yes register: device_read_iops_4 - ignore_errors: yes - name: cleanup docker_container: @@ -961,13 +924,6 @@ - device_read_iops_2 is not changed - device_read_iops_3 is not changed - device_read_iops_4 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_read_iops_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_read_iops_1.msg" - - "'Minimum version required is 1.9.0 ' in device_read_iops_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_write_bps and device_write_iops ########################## @@ -986,7 +942,6 @@ - path: /dev/urandom rate: 30 register: device_write_limit_1 - ignore_errors: yes - name: device_write_bps and device_write_iops (idempotency) docker_container: @@ -1001,7 +956,6 @@ - path: /dev/urandom rate: 30 register: device_write_limit_2 - ignore_errors: yes - name: device_write_bps device_write_iops (changed) docker_container: @@ -1017,7 +971,6 @@ rate: 100 force_kill: yes register: device_write_limit_3 - ignore_errors: yes - name: cleanup docker_container: @@ -1031,13 +984,6 @@ - device_write_limit_1 is changed - device_write_limit_2 is not changed - device_write_limit_3 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_write_limit_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_write_limit_1.msg" - - "'Minimum version required is 1.9.0 ' in device_write_limit_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_requests ################################################# @@ -1074,14 +1020,13 @@ that: - device_requests_1 is changed - device_requests_2 is not changed - when: docker_py_version is version('4.3.0', '>=') and docker_api_version is version('1.40', '>=') + when: docker_api_version is version('1.40', '>=') - assert: that: - device_requests_1 is failed - | - (('version is ' ~ docker_py_version ~ ' ') in device_requests_1.msg and 'Minimum version required is 4.3.0 ' in device_requests_1.msg) or - (('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg) - when: docker_py_version is version('4.3.0', '<') or docker_api_version is version('1.40', '<') + ('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg + when: docker_api_version is version('1.40', '<') #################################################################### ## dns_opts ######################################################## @@ -1097,7 +1042,6 @@ - "timeout:10" - rotate register: dns_opts_1 - ignore_errors: yes - name: dns_opts (idempotency) docker_container: @@ -1109,7 +1053,6 @@ - rotate - "timeout:10" register: dns_opts_2 - ignore_errors: yes - name: dns_opts (less resolv.conf options) docker_container: @@ -1120,7 +1063,6 @@ dns_opts: - "timeout:10" register: dns_opts_3 - ignore_errors: yes - name: dns_opts (more resolv.conf options) docker_container: @@ -1133,7 +1075,6 @@ - no-check-names force_kill: yes register: dns_opts_4 - ignore_errors: yes - name: cleanup docker_container: @@ -1148,13 +1089,6 @@ - dns_opts_2 is not changed - dns_opts_3 is not changed - dns_opts_4 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - dns_opts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in dns_opts_1.msg" - - "'Minimum version required is 1.10.0 ' in dns_opts_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## dns_search_domains ############################################## @@ -1854,7 +1788,6 @@ retries: 2 force_kill: yes register: healthcheck_1 - ignore_errors: yes - name: healthcheck (idempotency) docker_container: @@ -1872,7 +1805,6 @@ retries: 2 force_kill: yes register: healthcheck_2 - ignore_errors: yes - name: healthcheck (changed) docker_container: @@ -1890,7 +1822,6 @@ retries: 3 force_kill: yes register: healthcheck_3 - ignore_errors: yes - name: healthcheck (no change) docker_container: @@ -1900,7 +1831,6 @@ state: started force_kill: yes register: healthcheck_4 - ignore_errors: yes - name: healthcheck (disabled) docker_container: @@ -1913,7 +1843,6 @@ - NONE force_kill: yes register: healthcheck_5 - ignore_errors: yes - name: healthcheck (disabled, idempotency) docker_container: @@ -1926,7 +1855,6 @@ - NONE force_kill: yes register: healthcheck_6 - ignore_errors: yes - name: healthcheck (disabled, idempotency, strict) docker_container: @@ -1941,7 +1869,6 @@ comparisons: '*': strict register: healthcheck_7 - ignore_errors: yes - name: healthcheck (string in healthcheck test, changed) docker_container: @@ -1953,7 +1880,6 @@ test: "sleep 1" force_kill: yes register: healthcheck_8 - ignore_errors: yes - name: healthcheck (string in healthcheck test, idempotency) docker_container: @@ -1965,7 +1891,6 @@ test: "sleep 1" force_kill: yes register: healthcheck_9 - ignore_errors: yes - name: cleanup docker_container: @@ -1985,13 +1910,6 @@ - healthcheck_7 is not changed - healthcheck_8 is changed - healthcheck_9 is not changed - when: docker_py_version is version('2.0.0', '>=') -- assert: - that: - - healthcheck_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in healthcheck_1.msg" - - "'Minimum version required is 2.0.0 ' in healthcheck_1.msg" - when: docker_py_version is version('2.0.0', '<') #################################################################### ## hostname ######################################################## @@ -2050,7 +1968,6 @@ init: yes state: started register: init_1 - ignore_errors: yes - name: init (idempotency) docker_container: @@ -2060,7 +1977,6 @@ init: yes state: started register: init_2 - ignore_errors: yes - name: init (change) docker_container: @@ -2071,7 +1987,6 @@ state: started force_kill: yes register: init_3 - ignore_errors: yes - name: cleanup docker_container: @@ -2085,13 +2000,6 @@ - init_1 is changed - init_2 is not changed - init_3 is changed - when: docker_py_version is version('2.2.0', '>=') -- assert: - that: - - init_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in init_1.msg" - - "'Minimum version required is 2.2.0 ' in init_1.msg" - when: docker_py_version is version('2.2.0', '<') #################################################################### ## interactive ##################################################### @@ -2462,7 +2370,6 @@ state: absent force_kill: yes diff: no - ignore_errors: yes - assert: that: @@ -3188,8 +3095,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pid_mode: "container:{{ pid_mode_helper.container.Id }}" register: pid_mode_1 - ignore_errors: yes - # docker-py < 2.0 does not support "arbitrary" pid_mode values - name: pid_mode (idempotency) docker_container: @@ -3199,8 +3104,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pid_mode: "container:{{ cname_h1 }}" register: pid_mode_2 - ignore_errors: yes - # docker-py < 2.0 does not support "arbitrary" pid_mode values - name: pid_mode (change) docker_container: @@ -3229,13 +3132,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - pid_mode_1 is changed - pid_mode_2 is not changed - pid_mode_3 is changed - when: docker_py_version is version('2.0.0', '>=') -- assert: - that: - - pid_mode_1 is failed - - pid_mode_2 is failed - - pid_mode_3 is changed - when: docker_py_version is version('2.0.0', '<') #################################################################### ## pids_limit ###################################################### @@ -3249,7 +3145,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pids_limit: 10 register: pids_limit_1 - ignore_errors: yes - name: pids_limit (idempotency) docker_container: @@ -3259,7 +3154,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pids_limit: 10 register: pids_limit_2 - ignore_errors: yes - name: pids_limit (changed) docker_container: @@ -3270,7 +3164,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau pids_limit: 20 force_kill: yes register: pids_limit_3 - ignore_errors: yes - name: cleanup docker_container: @@ -3284,13 +3177,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - pids_limit_1 is changed - pids_limit_2 is not changed - pids_limit_3 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - pids_limit_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in pids_limit_1.msg" - - "'Minimum version required is 1.10.0 ' in pids_limit_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## privileged ###################################################### @@ -3648,7 +3534,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau runtime: runc state: started register: runtime_1 - ignore_errors: yes - name: runtime (idempotency) docker_container: @@ -3658,7 +3543,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau runtime: runc state: started register: runtime_2 - ignore_errors: yes - name: cleanup docker_container: @@ -3671,13 +3555,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau that: - runtime_1 is changed - runtime_2 is not changed - when: docker_py_version is version('2.4.0', '>=') -- assert: - that: - - runtime_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in runtime_1.msg" - - "'Minimum version required is 2.4.0 ' in runtime_1.msg" - when: docker_py_version is version('2.4.0', '<') #################################################################### ## security_opts ################################################### @@ -3975,7 +3852,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv4.icmp_echo_ignore_all: 1 net.ipv4.ip_forward: 1 register: sysctls_1 - ignore_errors: yes - name: sysctls (idempotency) docker_container: @@ -3987,7 +3863,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv4.ip_forward: 1 net.ipv4.icmp_echo_ignore_all: 1 register: sysctls_2 - ignore_errors: yes - name: sysctls (less sysctls) docker_container: @@ -3998,7 +3873,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau sysctls: net.ipv4.icmp_echo_ignore_all: 1 register: sysctls_3 - ignore_errors: yes - name: sysctls (more sysctls) docker_container: @@ -4011,7 +3885,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv6.conf.default.accept_redirects: 0 force_kill: yes register: sysctls_4 - ignore_errors: yes - name: cleanup docker_container: @@ -4026,13 +3899,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - sysctls_2 is not changed - sysctls_3 is not changed - sysctls_4 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - sysctls_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in sysctls_1.msg" - - "'Minimum version required is 1.10.0 ' in sysctls_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## tmpfs ########################################################### @@ -4260,7 +4126,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau userns_mode: host state: started register: userns_mode_1 - ignore_errors: yes - name: userns_mode (idempotency) docker_container: @@ -4270,7 +4135,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau userns_mode: host state: started register: userns_mode_2 - ignore_errors: yes - name: userns_mode (change) docker_container: @@ -4281,7 +4145,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started force_kill: yes register: userns_mode_3 - ignore_errors: yes - name: cleanup docker_container: @@ -4295,13 +4158,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - userns_mode_1 is changed - userns_mode_2 is not changed - userns_mode_3 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - userns_mode_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in userns_mode_1.msg" - - "'Minimum version required is 1.10.0 ' in userns_mode_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## uts ############################################################# @@ -4315,7 +4171,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau uts: host state: started register: uts_1 - ignore_errors: yes - name: uts (idempotency) docker_container: @@ -4325,7 +4180,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau uts: host state: started register: uts_2 - ignore_errors: yes - name: uts (change) docker_container: @@ -4336,7 +4190,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started force_kill: yes register: uts_3 - ignore_errors: yes - name: cleanup docker_container: @@ -4350,13 +4203,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - uts_1 is changed - uts_2 is not changed - uts_3 is changed - when: docker_py_version is version('3.5.0', '>=') -- assert: - that: - - uts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in uts_1.msg" - - "'Minimum version required is 3.5.0 ' in uts_1.msg" - when: docker_py_version is version('3.5.0', '<') #################################################################### ## working_dir ##################################################### From 7f9b43cbbffac2338f55cf8a3f29bff06661a4ec Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 15:12:47 +0200 Subject: [PATCH 34/38] Avoid recreation if a pure update is possible without losing the diff data. --- .../module_utils/module_container/module.py | 9 +- .../docker_container/tasks/tests/update.yml | 171 ++++++++++++++++++ 2 files changed, 179 insertions(+), 1 deletion(-) create mode 100644 tests/integration/targets/docker_container/tasks/tests/update.yml diff --git a/plugins/module_utils/module_container/module.py b/plugins/module_utils/module_container/module.py index c4e444514..56d266b2d 100644 --- a/plugins/module_utils/module_container/module.py +++ b/plugins/module_utils/module_container/module.py @@ -469,10 +469,17 @@ def sort_key_fn(x): def has_different_configuration(self, container, image): differences = DifferenceTracker() + update_differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) - self._record_differences(differences, options, param_values, engine, container, image) + if engine.can_update_value(self.engine_driver.get_api_version(self.client)): + self._record_differences(update_differences, options, param_values, engine, container, image) + else: + self._record_differences(differences, options, param_values, engine, container, image) has_differences = not differences.empty + # Only consider differences of properties that can be updated when there are also other differences + if has_differences: + differences.merge(update_differences) return has_differences, differences def has_different_resource_limits(self, container, image): diff --git a/tests/integration/targets/docker_container/tasks/tests/update.yml b/tests/integration/targets/docker_container/tasks/tests/update.yml new file mode 100644 index 000000000..fa78b7fcb --- /dev/null +++ b/tests/integration/targets/docker_container/tasks/tests/update.yml @@ -0,0 +1,171 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-update' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +# We do not test cpuset_cpus and cpuset_mems since changing it fails if the system does +# not have 'enough' CPUs. We do not test kernel_memory since it is deprecated and fails. + +- name: Create container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 123 + cpu_period: 90000 + cpu_quota: 150000 + cpu_shares: 900 + memory: 64M + memory_reservation: 64M + memory_swap: 64M + restart_policy: on-failure + restart_retries: 5 + register: create + +- name: Update values + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 234 + cpu_period: 50000 + cpu_quota: 50000 + cpu_shares: 1100 + memory: 48M + memory_reservation: 48M + memory_swap: unlimited + restart_policy: on-failure # only on-failure can have restart_retries, so don't change it here + restart_retries: 2 + register: update + diff: yes + +- name: Update values again + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 135 + cpu_period: 30000 + cpu_quota: 40000 + cpu_shares: 1000 + memory: 32M + memory_reservation: 30M + memory_swap: 128M + restart_policy: always + restart_retries: 0 + register: update2 + diff: yes + +- name: Recreate container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 20m"' # this will force re-creation + name: "{{ cname }}" + state: started + blkio_weight: 234 + cpu_period: 50000 + cpu_quota: 50000 + cpu_shares: 1100 + memory: 48M + memory_reservation: 48M + memory_swap: unlimited + restart_policy: on-failure + restart_retries: 2 + force_kill: yes + register: recreate + diff: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: Check general things + assert: + that: + - create is changed + - update is changed + - update2 is changed + - recreate is changed + + # Make sure the container was *not* recreated when it should not be + - create.container.Id == update.container.Id + - create.container.Id == update2.container.Id + + # Make sure that the container was recreated when it should be + - create.container.Id != recreate.container.Id + +- name: Check diff for first update + assert: + that: + - update.diff.before.blkio_weight == 123 + - update.diff.after.blkio_weight == 234 + - update.diff.before.cpu_period == 90000 + - update.diff.after.cpu_period == 50000 + - update.diff.before.cpu_quota == 150000 + - update.diff.after.cpu_quota == 50000 + - update.diff.before.cpu_shares == 900 + - update.diff.after.cpu_shares == 1100 + - update.diff.before.memory == 67108864 + - update.diff.after.memory == 50331648 + - update.diff.before.memory_reservation == 67108864 + - update.diff.after.memory_reservation == 50331648 + - update.diff.before.memory_swap == 67108864 + - update.diff.after.memory_swap == -1 + - "'restart_policy' not in update.diff.before" + - update.diff.before.restart_retries == 5 + - update.diff.after.restart_retries == 2 + +- name: Check diff for second update + assert: + that: + - update2.diff.before.blkio_weight == 234 + - update2.diff.after.blkio_weight == 135 + - update2.diff.before.cpu_period == 50000 + - update2.diff.after.cpu_period == 30000 + - update2.diff.before.cpu_quota == 50000 + - update2.diff.after.cpu_quota == 40000 + - update2.diff.before.cpu_shares == 1100 + - update2.diff.after.cpu_shares == 1000 + - update2.diff.before.memory == 50331648 + - update2.diff.after.memory == 33554432 + - update2.diff.before.memory_reservation == 50331648 + - update2.diff.after.memory_reservation == 31457280 + - update2.diff.before.memory_swap == -1 + - update2.diff.after.memory_swap == 134217728 + - update2.diff.before.restart_policy == 'on-failure' + - update2.diff.after.restart_policy == 'always' + - update2.diff.before.restart_retries == 2 + - update2.diff.after.restart_retries == 0 + +- name: Check diff for recreation + assert: + that: + - recreate.diff.before.blkio_weight == 135 + - recreate.diff.after.blkio_weight == 234 + - recreate.diff.before.cpu_period == 30000 + - recreate.diff.after.cpu_period == 50000 + - recreate.diff.before.cpu_quota == 40000 + - recreate.diff.after.cpu_quota == 50000 + - recreate.diff.before.cpu_shares == 1000 + - recreate.diff.after.cpu_shares == 1100 + - recreate.diff.before.memory == 33554432 + - recreate.diff.after.memory == 50331648 + - recreate.diff.before.memory_reservation == 31457280 + - recreate.diff.after.memory_reservation == 50331648 + - recreate.diff.before.memory_swap == 134217728 + - recreate.diff.after.memory_swap == -1 + - recreate.diff.before.restart_policy == 'always' + - recreate.diff.after.restart_policy == 'on-failure' + - recreate.diff.before.restart_retries == 0 + - recreate.diff.after.restart_retries == 2 + - recreate.diff.before.command == ['/bin/sh', '-c', 'sleep 10m'] + - recreate.diff.after.command == ['/bin/sh', '-c', 'sleep 20m'] From fe790a0dea44a553c33be1ddce4128fe90220bbb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 16:26:56 +0200 Subject: [PATCH 35/38] Cover the case that blkio_weight does not work. --- .../targets/docker_container/tasks/tests/update.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/docker_container/tasks/tests/update.yml b/tests/integration/targets/docker_container/tasks/tests/update.yml index fa78b7fcb..87a3e177b 100644 --- a/tests/integration/targets/docker_container/tasks/tests/update.yml +++ b/tests/integration/targets/docker_container/tasks/tests/update.yml @@ -106,7 +106,8 @@ - name: Check diff for first update assert: that: - - update.diff.before.blkio_weight == 123 + # blkio_weight sometimes cannot be set, then we end up with 0 instead of the value we had + - update.diff.before.blkio_weight == 123 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) - update.diff.after.blkio_weight == 234 - update.diff.before.cpu_period == 90000 - update.diff.after.cpu_period == 50000 @@ -127,7 +128,7 @@ - name: Check diff for second update assert: that: - - update2.diff.before.blkio_weight == 234 + - update2.diff.before.blkio_weight == 234 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) - update2.diff.after.blkio_weight == 135 - update2.diff.before.cpu_period == 50000 - update2.diff.after.cpu_period == 30000 @@ -149,7 +150,7 @@ - name: Check diff for recreation assert: that: - - recreate.diff.before.blkio_weight == 135 + - recreate.diff.before.blkio_weight == 135 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) - recreate.diff.after.blkio_weight == 234 - recreate.diff.before.cpu_period == 30000 - recreate.diff.after.cpu_period == 50000 From d012dffa3de37267eb3f0cc3404dc0cf4cc77b7f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 16:27:14 +0200 Subject: [PATCH 36/38] Update plugins/module_utils/module_container/docker_api.py Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com> --- plugins/module_utils/module_container/docker_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py index d78a730e1..c4f213b12 100644 --- a/plugins/module_utils/module_container/docker_api.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -1157,7 +1157,7 @@ def _preprocess_container_names(module, client, api_version, value): return value container_name = value[len('container:'):] # Try to inspect container to see whether this is an ID or a - # name (and in the latter case, retrieve it's ID) + # name (and in the latter case, retrieve its ID) container = client.get_container(container_name) if container is None: # If we can't find the container, issue a warning and continue with From 5d7370a5d910212685cdc9f3c3e949c3f5ed6b0d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 16:52:00 +0200 Subject: [PATCH 37/38] Improve memory_swap tests. --- .../targets/docker_container/tasks/tests/update.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/targets/docker_container/tasks/tests/update.yml b/tests/integration/targets/docker_container/tasks/tests/update.yml index 87a3e177b..bfff1070e 100644 --- a/tests/integration/targets/docker_container/tasks/tests/update.yml +++ b/tests/integration/targets/docker_container/tasks/tests/update.yml @@ -119,8 +119,8 @@ - update.diff.after.memory == 50331648 - update.diff.before.memory_reservation == 67108864 - update.diff.after.memory_reservation == 50331648 - - update.diff.before.memory_swap == 67108864 - - update.diff.after.memory_swap == -1 + - (update.diff.before.memory_swap | default(0)) == 67108864 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (update.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) - "'restart_policy' not in update.diff.before" - update.diff.before.restart_retries == 5 - update.diff.after.restart_retries == 2 @@ -140,8 +140,8 @@ - update2.diff.after.memory == 33554432 - update2.diff.before.memory_reservation == 50331648 - update2.diff.after.memory_reservation == 31457280 - - update2.diff.before.memory_swap == -1 - - update2.diff.after.memory_swap == 134217728 + - (update2.diff.before.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (update2.diff.after.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) - update2.diff.before.restart_policy == 'on-failure' - update2.diff.after.restart_policy == 'always' - update2.diff.before.restart_retries == 2 @@ -162,8 +162,8 @@ - recreate.diff.after.memory == 50331648 - recreate.diff.before.memory_reservation == 31457280 - recreate.diff.after.memory_reservation == 50331648 - - recreate.diff.before.memory_swap == 134217728 - - recreate.diff.after.memory_swap == -1 + - (recreate.diff.before.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (recreate.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) - recreate.diff.before.restart_policy == 'always' - recreate.diff.after.restart_policy == 'on-failure' - recreate.diff.before.restart_retries == 0 From 89bf3da6c763c7d95378d54ad3c681fba3992cec Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 14 Jul 2022 23:31:58 +0200 Subject: [PATCH 38/38] Fix URLs in changelog fragment. --- changelogs/fragments/docker_container.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/changelogs/fragments/docker_container.yml b/changelogs/fragments/docker_container.yml index d49aea56a..085f2b061 100644 --- a/changelogs/fragments/docker_container.yml +++ b/changelogs/fragments/docker_container.yml @@ -1,11 +1,11 @@ major_changes: - "docker_container - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, - these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/...)." - - "docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/...)." + these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/422)." breaking_changes: - - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/...)." - - "docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``. Before, its value was assumed to be identical with the value of ``published_ports`` (https://github.com/ansible-collections/community.docker/pull/...)." - - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." - - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/...)." - - "docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/...)." + - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``. Before, its value was assumed to be identical with the value of ``published_ports`` (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/422)."