From b80197982dc65411ce6577e927306fd77c5aa2ad Mon Sep 17 00:00:00 2001 From: blacknon Date: Thu, 2 Nov 2023 15:18:26 +0900 Subject: [PATCH 1/8] update. proxmox lxc add ostype param --- plugins/modules/proxmox.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 7a0df1422ba..0c9286f1abc 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -497,6 +497,10 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout self.module.fail_json(msg='%s is not a valid tag' % tag) kwargs['tags'] = ",".join(kwargs['tags']) + if 'ostype' in kwargs: + if kwargs['ostype'] == 'auto': + kwargs.pop('ostype') + if clone is not None: if VZ_TYPE != 'lxc': self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") @@ -612,6 +616,7 @@ def main(): netif=dict(type='dict'), mounts=dict(type='dict'), ip_address=dict(), + ostype=dict(default='auto', choices=['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged']), onboot=dict(type='bool'), features=dict(type='list', elements='str'), storage=dict(default='local'), @@ -719,6 +724,7 @@ def main(): ostemplate=module.params['ostemplate'], netif=module.params['netif'], mounts=module.params['mounts'], + ostype=module.params['ostype'], ip_address=module.params['ip_address'], onboot=ansible_to_proxmox_bool(module.params['onboot']), cpuunits=module.params['cpuunits'], From f9e042f9bfc9abfd4f4ce0793f4ce8ab6572e9c7 Mon Sep 17 00:00:00 2001 From: blacknon Date: Thu, 2 Nov 2023 18:06:23 +0900 Subject: [PATCH 2/8] update. --- plugins/modules/proxmox.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 0c9286f1abc..186ab8cc949 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -98,6 +98,12 @@ - target storage type: str default: 'local' + ostype: + description: + - specifies the ostype of the lxc container + choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged'] + type: str + default: 'auto' cpuunits: description: - CPU weight for a VM From b5937fb2d78fa97017504cecd641283318cd1ab5 Mon Sep 17 00:00:00 2001 From: blacknon Date: Thu, 2 Nov 2023 22:26:39 +0900 Subject: [PATCH 3/8] update. E501 --- plugins/modules/proxmox.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 186ab8cc949..48bdc91526b 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -622,7 +622,9 @@ def main(): netif=dict(type='dict'), mounts=dict(type='dict'), ip_address=dict(), - ostype=dict(default='auto', choices=['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged']), + ostype=dict(default='auto', choices=[ + 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' + ]), onboot=dict(type='bool'), features=dict(type='list', elements='str'), storage=dict(default='local'), From da6f7a83572e09234871db814b3922f988175c26 Mon Sep 17 00:00:00 2001 From: blacknon Date: Thu, 2 Nov 2023 23:33:33 +0900 Subject: [PATCH 4/8] update. E123 --- plugins/modules/proxmox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 48bdc91526b..7b9656d07b8 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -624,7 +624,7 @@ def main(): ip_address=dict(), ostype=dict(default='auto', choices=[ 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' - ]), + ]), onboot=dict(type='bool'), features=dict(type='list', elements='str'), storage=dict(default='local'), From 267e35fdd2313953122bf0e39458fa540930c3d0 Mon Sep 17 00:00:00 2001 From: blacknon Date: Fri, 3 Nov 2023 09:02:24 +0900 Subject: [PATCH 5/8] update. pullrequestreview-1711205075 --- plugins/modules/proxmox.py | 178 +++++++++++++++++++++++-------------- 1 file changed, 113 insertions(+), 65 deletions(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 7b9656d07b8..a4d126509fc 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -6,6 +6,13 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +import time +import re __metaclass__ = type DOCUMENTATION = ''' @@ -100,10 +107,12 @@ default: 'local' ostype: description: - - specifies the ostype of the lxc container + - Specifies the C(ostype) of the LXC container. + - If set to V(auto), no C(ostype) will be provided on instance creation. choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged'] type: str default: 'auto' + version_added: 8.1.0 cpuunits: description: - CPU weight for a VM @@ -434,16 +443,6 @@ state: absent ''' -import re -import time - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) VZ_TYPE = None @@ -503,13 +502,13 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout self.module.fail_json(msg='%s is not a valid tag' % tag) kwargs['tags'] = ",".join(kwargs['tags']) - if 'ostype' in kwargs: - if kwargs['ostype'] == 'auto': - kwargs.pop('ostype') + if kwargs.get('ostype') == 'auto': + kwargs.pop('ostype') if clone is not None: if VZ_TYPE != 'lxc': - self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") + self.module.fail_json( + changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") clone_is_template = self.is_template_container(node, clone) @@ -523,11 +522,13 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout create_full_copy = True elif self.module.params['storage'] is None and not clone_is_template: # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") + self.module.fail_json( + changed=False, msg="Cloned container is not a template, storage needs to be specified.") if self.module.params['clone_type'] == 'linked': if not clone_is_template: - self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") + self.module.fail_json( + changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") # Don't need to do more, by default create_full_copy is set to false already elif self.module.params['clone_type'] == 'opportunistic': if not clone_is_template: @@ -547,9 +548,11 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout if self.module.params[param] is not None: clone_parameters[param] = self.module.params[param] - taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) + taskid = getattr(proxmox_node, VZ_TYPE)( + clone).clone.post(newid=vmid, **clone_parameters) else: - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) + taskid = getattr(proxmox_node, VZ_TYPE).create( + vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) while timeout: if self.api_task_ok(node, taskid): @@ -563,7 +566,8 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout return False def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() + taskid = getattr(self.proxmox_api.nodes( + vm['node']), VZ_TYPE)(vmid).status.start.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -577,9 +581,11 @@ def start_instance(self, vm, vmid, timeout): def stop_instance(self, vm, vmid, timeout, force): if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)( + vmid).status.shutdown.post(forceStop=1) else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)( + vmid).status.shutdown.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -592,7 +598,8 @@ def stop_instance(self, vm, vmid, timeout, force): return False def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() + taskid = getattr(self.proxmox_api.nodes( + vm['node']), VZ_TYPE)(vmid).status.umount.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -634,7 +641,8 @@ def main(): timeout=dict(type='int', default=30), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), + state=dict(default='present', choices=[ + 'present', 'absent', 'stopped', 'started', 'restarted']), pubkey=dict(type='str'), unprivileged=dict(type='bool', default=True), description=dict(type='str'), @@ -643,7 +651,8 @@ def main(): proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults'], removed_in_version='9.0.0', removed_from_collection='community.general'), clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), + clone_type=dict(default='opportunistic', choices=[ + 'full', 'linked', 'opportunistic']), tags=dict(type='list', elements='str') ) module_args.update(proxmox_args) @@ -652,14 +661,16 @@ def main(): argument_spec=module_args, required_if=[ ('state', 'present', ['node', 'hostname']), - ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we - # either clone a container or create a new one from a template file. + # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we + ('state', 'present', ('clone', 'ostemplate'), True), + # either clone a container or create a new one from a template file. ], required_together=[ ('api_token_id', 'api_token_secret') ], required_one_of=[('api_password', 'api_token_id')], - mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template. + # Creating a new container is done either by cloning an existing one, or based on a template. + mutually_exclusive=[('clone', 'ostemplate')], ) proxmox = ProxmoxLxcAnsible(module) @@ -702,26 +713,31 @@ def main(): elif not vmid and hostname: vmid = proxmox.get_vmid(hostname) elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + module.exit_json( + changed=False, msg="Vmid could not be fetched for the following action: %s" % state) # Create a new container if state == 'present' and clone is None: try: if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if (not module.params['vmid'] and proxmox.get_vmid(hostname, ignore_missing=True) and not module.params['force']): vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + module.exit_json( + changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) elif not proxmox.get_node(node): - module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node) + module.fail_json( + vmid=vmid, msg="node '%s' not exists in cluster" % node) elif not proxmox.content_check(node, module.params['ostemplate'], template_store): module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, template_store)) except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + module.fail_json( + vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, @@ -734,56 +750,72 @@ def main(): mounts=module.params['mounts'], ostype=module.params['ostype'], ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool(module.params['onboot']), + onboot=ansible_to_proxmox_bool( + module.params['onboot']), cpuunits=module.params['cpuunits'], nameserver=module.params['nameserver'], searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool(module.params['force']), + force=ansible_to_proxmox_bool( + module.params['force']), pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), + features=",".join( + module.params['features']) if module.params['features'] is not None else None, + unprivileged=ansible_to_proxmox_bool( + module.params['unprivileged']), description=module.params['description'], hookscript=module.params['hookscript'], timezone=module.params['timezone'], tags=module.params['tags']) - module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) + module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % ( + vmid, module.params['ostemplate'])) except Exception as e: - module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + module.fail_json( + vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) # Clone a container elif state == 'present' and clone is not None: try: if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if (not module.params['vmid'] and proxmox.get_vmid(hostname, ignore_missing=True) and not module.params['force']): vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + module.exit_json( + changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist") + module.exit_json(changed=False, vmid=vmid, + msg="Container to be cloned does not exist") except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + module.fail_json( + vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) + proxmox.create_instance( + vmid, node, disk, storage, cpus, memory, swap, timeout, clone) - module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone)) + module.exit_json(changed=True, vmid=vmid, + msg="Cloned VM %s from %s" % (vmid, clone)) except Exception as e: - module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + module.fail_json( + vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) elif state == 'started': try: vm = proxmox.get_vm(vmid) if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM %s is already running" % vmid) if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid) + module.exit_json(changed=True, vmid=vmid, + msg="VM %s started" % vmid) except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json( + vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: @@ -792,58 +824,73 @@ def main(): if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) + module.exit_json(changed=True, vmid=vmid, + msg="VM %s is shutting down" % vmid) else: module.exit_json(changed=False, vmid=vmid, msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid) if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM %s is already shutdown" % vmid) if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) + module.exit_json(changed=True, vmid=vmid, + msg="VM %s is shutting down" % vmid) except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json( + vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = proxmox.get_vm(vmid) - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)( + vmid).status.current.get()['status'] if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM %s is not running" % vmid) if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid) + module.exit_json(changed=True, vmid=vmid, + msg="VM %s is restarted" % vmid) except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json( + vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': if not vmid: - module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname) + module.exit_json(changed=False, vmid=vmid, + msg='VM with hostname = %s is already absent' % hostname) try: vm = proxmox.get_vm(vmid, ignore_missing=True) if not vm: - module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid) + module.exit_json(changed=False, vmid=vmid, + msg="VM %s does not exist" % vmid) - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)( + vmid).status.current.get()['status'] if vm_status == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) + module.exit_json( + changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) if vm_status == 'mounted': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + module.exit_json( + changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) delete_params = {} if module.params['purge']: delete_params['purge'] = 1 - taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) + taskid = getattr(proxmox.proxmox_api.nodes( + vm['node']), VZ_TYPE).delete(vmid, **delete_params) while timeout: if proxmox.api_task_ok(vm['node'], taskid): - module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid) + module.exit_json(changed=True, vmid=vmid, + taskid=taskid, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' @@ -851,7 +898,8 @@ def main(): time.sleep(1) except Exception as e: - module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) + module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % ( + vmid, to_native(e))) if __name__ == '__main__': From 4455733bb0d4f1f97595f08f929585732fafb947 Mon Sep 17 00:00:00 2001 From: blacknon Date: Fri, 3 Nov 2023 09:08:58 +0900 Subject: [PATCH 6/8] update. commit undo formatted by editor auto-update --- plugins/modules/proxmox.py | 169 +++++++++++++------------------------ 1 file changed, 61 insertions(+), 108 deletions(-) diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index a4d126509fc..c675abfd66e 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -6,13 +6,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -import time -import re __metaclass__ = type DOCUMENTATION = ''' @@ -443,6 +436,16 @@ state: absent ''' +import re +import time + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) VZ_TYPE = None @@ -507,8 +510,7 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout if clone is not None: if VZ_TYPE != 'lxc': - self.module.fail_json( - changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") + self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") clone_is_template = self.is_template_container(node, clone) @@ -522,13 +524,11 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout create_full_copy = True elif self.module.params['storage'] is None and not clone_is_template: # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json( - changed=False, msg="Cloned container is not a template, storage needs to be specified.") + self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") if self.module.params['clone_type'] == 'linked': if not clone_is_template: - self.module.fail_json( - changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") + self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") # Don't need to do more, by default create_full_copy is set to false already elif self.module.params['clone_type'] == 'opportunistic': if not clone_is_template: @@ -548,11 +548,9 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout if self.module.params[param] is not None: clone_parameters[param] = self.module.params[param] - taskid = getattr(proxmox_node, VZ_TYPE)( - clone).clone.post(newid=vmid, **clone_parameters) + taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) else: - taskid = getattr(proxmox_node, VZ_TYPE).create( - vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) while timeout: if self.api_task_ok(node, taskid): @@ -566,8 +564,7 @@ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout return False def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes( - vm['node']), VZ_TYPE)(vmid).status.start.post() + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -581,11 +578,9 @@ def start_instance(self, vm, vmid, timeout): def stop_instance(self, vm, vmid, timeout, force): if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)( - vmid).status.shutdown.post(forceStop=1) + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)( - vmid).status.shutdown.post() + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -598,8 +593,7 @@ def stop_instance(self, vm, vmid, timeout, force): return False def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes( - vm['node']), VZ_TYPE)(vmid).status.umount.post() + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() while timeout: if self.api_task_ok(vm['node'], taskid): return True @@ -641,8 +635,7 @@ def main(): timeout=dict(type='int', default=30), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), - state=dict(default='present', choices=[ - 'present', 'absent', 'stopped', 'started', 'restarted']), + state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), pubkey=dict(type='str'), unprivileged=dict(type='bool', default=True), description=dict(type='str'), @@ -651,8 +644,7 @@ def main(): proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults'], removed_in_version='9.0.0', removed_from_collection='community.general'), clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=[ - 'full', 'linked', 'opportunistic']), + clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), tags=dict(type='list', elements='str') ) module_args.update(proxmox_args) @@ -661,16 +653,14 @@ def main(): argument_spec=module_args, required_if=[ ('state', 'present', ['node', 'hostname']), - # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we - ('state', 'present', ('clone', 'ostemplate'), True), - # either clone a container or create a new one from a template file. + ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we + # either clone a container or create a new one from a template file. ], required_together=[ ('api_token_id', 'api_token_secret') ], required_one_of=[('api_password', 'api_token_id')], - # Creating a new container is done either by cloning an existing one, or based on a template. - mutually_exclusive=[('clone', 'ostemplate')], + mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template. ) proxmox = ProxmoxLxcAnsible(module) @@ -713,31 +703,26 @@ def main(): elif not vmid and hostname: vmid = proxmox.get_vmid(hostname) elif not vmid: - module.exit_json( - changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) # Create a new container if state == 'present' and clone is None: try: if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, - msg="VM with vmid = %s is already exists" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if (not module.params['vmid'] and proxmox.get_vmid(hostname, ignore_missing=True) and not module.params['force']): vmid = proxmox.get_vmid(hostname) - module.exit_json( - changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) elif not proxmox.get_node(node): - module.fail_json( - vmid=vmid, msg="node '%s' not exists in cluster" % node) + module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node) elif not proxmox.content_check(node, module.params['ostemplate'], template_store): module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, template_store)) except Exception as e: - module.fail_json( - vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, @@ -750,72 +735,56 @@ def main(): mounts=module.params['mounts'], ostype=module.params['ostype'], ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool( - module.params['onboot']), + onboot=ansible_to_proxmox_bool(module.params['onboot']), cpuunits=module.params['cpuunits'], nameserver=module.params['nameserver'], searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool( - module.params['force']), + force=ansible_to_proxmox_bool(module.params['force']), pubkey=module.params['pubkey'], - features=",".join( - module.params['features']) if module.params['features'] is not None else None, - unprivileged=ansible_to_proxmox_bool( - module.params['unprivileged']), + features=",".join(module.params['features']) if module.params['features'] is not None else None, + unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), description=module.params['description'], hookscript=module.params['hookscript'], timezone=module.params['timezone'], tags=module.params['tags']) - module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % ( - vmid, module.params['ostemplate'])) + module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception as e: - module.fail_json( - vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) # Clone a container elif state == 'present' and clone is not None: try: if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, - msg="VM with vmid = %s is already exists" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if (not module.params['vmid'] and proxmox.get_vmid(hostname, ignore_missing=True) and not module.params['force']): vmid = proxmox.get_vmid(hostname) - module.exit_json( - changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, - msg="Container to be cloned does not exist") + module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist") except Exception as e: - module.fail_json( - vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: - proxmox.create_instance( - vmid, node, disk, storage, cpus, memory, swap, timeout, clone) + proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) - module.exit_json(changed=True, vmid=vmid, - msg="Cloned VM %s from %s" % (vmid, clone)) + module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone)) except Exception as e: - module.fail_json( - vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) elif state == 'started': try: vm = proxmox.get_vm(vmid) if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, - msg="VM %s is already running" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid) if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, - msg="VM %s started" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid) except Exception as e: - module.fail_json( - vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: @@ -824,73 +793,58 @@ def main(): if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, - msg="VM %s is shutting down" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) else: module.exit_json(changed=False, vmid=vmid, msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid) if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, - msg="VM %s is already shutdown" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid) if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, - msg="VM %s is shutting down" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) except Exception as e: - module.fail_json( - vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = proxmox.get_vm(vmid) - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)( - vmid).status.current.get()['status'] + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, vmid=vmid, - msg="VM %s is not running" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid) if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, vmid=vmid, - msg="VM %s is restarted" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid) except Exception as e: - module.fail_json( - vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': if not vmid: - module.exit_json(changed=False, vmid=vmid, - msg='VM with hostname = %s is already absent' % hostname) + module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname) try: vm = proxmox.get_vm(vmid, ignore_missing=True) if not vm: - module.exit_json(changed=False, vmid=vmid, - msg="VM %s does not exist" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid) - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)( - vmid).status.current.get()['status'] + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] if vm_status == 'running': - module.exit_json( - changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) if vm_status == 'mounted': - module.exit_json( - changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) delete_params = {} if module.params['purge']: delete_params['purge'] = 1 - taskid = getattr(proxmox.proxmox_api.nodes( - vm['node']), VZ_TYPE).delete(vmid, **delete_params) + taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) while timeout: if proxmox.api_task_ok(vm['node'], taskid): - module.exit_json(changed=True, vmid=vmid, - taskid=taskid, msg="VM %s removed" % vmid) + module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' @@ -898,8 +852,7 @@ def main(): time.sleep(1) except Exception as e: - module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % ( - vmid, to_native(e))) + module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) if __name__ == '__main__': From 354c6268d5d756be73d2abaf42f069966945f13c Mon Sep 17 00:00:00 2001 From: blacknon Date: Fri, 3 Nov 2023 09:17:28 +0900 Subject: [PATCH 7/8] update. add change fragment (pullrequestreview-1711205075) --- ...dd-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml diff --git a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml new file mode 100644 index 00000000000..fe210e1ffd4 --- /dev/null +++ b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_ostype - now possible to specify the ostype when creating an lxc container. (https://github.com/ansible-collections/community.general/pull/7462). From a35d491b180e50657baf6f8500ac442a23025494 Mon Sep 17 00:00:00 2001 From: blacknon Date: Fri, 3 Nov 2023 16:27:44 +0900 Subject: [PATCH 8/8] update. pullrequestreview-1711911827 --- ...dd-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml index fe210e1ffd4..20a9b1d144d 100644 --- a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml +++ b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml @@ -1,2 +1,2 @@ minor_changes: - - proxmox_ostype - now possible to specify the ostype when creating an lxc container. (https://github.com/ansible-collections/community.general/pull/7462). + - proxmox_ostype - it is now possible to specify the ``ostype`` when creating an LXC container (https://github.com/ansible-collections/community.general/pull/7462).