From e832c5f274d04d2e21559be4debc27dbe183fd8e Mon Sep 17 00:00:00 2001 From: Dougal Seeley Date: Sat, 31 Dec 2022 17:20:32 +0000 Subject: [PATCH] Ansible 7 compatibility fixes (#15) + Move from `ec2` to `amazon.aws.ec2_instance` module. + `volumes` structure changed; maintain old syntax in `cluster_defs`. + Spot instances no longer supported. + `ec2_instance` returns success when instances are also in `pending`, so also check this. + Fix for route53 rescue. + Use older name of `instance_role` instead of `iam_instance_profile` for ec2_instance, as it is backwards compatible. + Update minimum Ansible version to 5.6.0; update assertions. + Enable selecting Ansible version in Jenkinsfile_testsuite and Jenkinsfile_ops + Jenkinsfile_testsuite: remove `findAll()`, which can't escape the sandbox + Revert to community versions of `libvirt` where possible + Add retries to libvirt pool refresh (in case of concurrent background operations) --- EXAMPLE/Pipfile | 4 +- EXAMPLE/README.md | 6 +- .../eu-west-1/dev/cluster_vars__buildenv.yml | 4 +- .../azure/cluster_vars__cloud.yml | 4 +- EXAMPLE/jenkinsfiles/Jenkinsfile_ops | 19 +++-- Pipfile | 4 +- README.md | 10 +-- _dependencies/tasks/main.yml | 16 ++-- clean/tasks/aws.yml | 6 +- clean/tasks/libvirt.yml | 2 +- .../tasks/get_cluster_hosts_state_aws.yml | 2 +- .../get_cluster_hosts_target_libvirt.yml | 4 +- config/tasks/create_dns_a.yml | 8 +- create/tasks/create_aws.yml | 82 ++++++++++--------- create/tasks/create_libvirt.yml | 9 +- jenkinsfiles/Jenkinsfile_testsuite | 30 +++---- .../tasks/powerchange_vms_libvirt.yml | 2 +- .../tasks/preflight.yml | 2 +- 18 files changed, 110 insertions(+), 104 deletions(-) diff --git a/EXAMPLE/Pipfile b/EXAMPLE/Pipfile index b73029db..c5a6f180 100644 --- a/EXAMPLE/Pipfile +++ b/EXAMPLE/Pipfile @@ -9,7 +9,7 @@ boto3 = "*" boto = "*" botocore = "*" requests = "*" -ansible = ">=2.9" +ansible = ">=5.6" jmespath = "*" dnspython = "*" google-auth = "*" @@ -24,4 +24,4 @@ PyVmomi = "*" [dev-packages] [requires] -python_version = "3" +python_version = "3.10" diff --git a/EXAMPLE/README.md b/EXAMPLE/README.md index 3c383dcf..c9814743 100644 --- a/EXAMPLE/README.md +++ b/EXAMPLE/README.md @@ -7,8 +7,8 @@ _**Please refer to the full [README.md](https://github.com/dseeley/clusterverse/ Contributions are welcome and encouraged. Please see [CONTRIBUTING.md](https://github.com/dseeley/clusterverse/blob/master/CONTRIBUTING.md) for details. ## Requirements -+ Ansible >= 2.9 -+ Python >= 2.7 ++ Ansible >= 5.6.0 ++ Python >= 3.8 --- @@ -93,9 +93,9 @@ ansible-playbook redeploy.yml -e buildenv=sandbox -e cloud_type=azure -e region= ### Mandatory command-line variables: + `-e buildenv=` - The environment (dev, stage, etc), which must be an attribute of `cluster_vars` defined in `group_vars//cluster_vars.yml` + `-e canary=['start', 'finish', 'filter', 'none', 'tidy']` - Specify whether to start, finish or filter a canary redeploy (or 'none', to redeploy the whole cluster in one command). See below (`-e canary_filter_regex`) for `canary=filter`. ++ `-e redeploy_scheme=` - The scheme corresponds to one defined in `roles/clusterverse/redeploy` ### Extra variables: -+ `-e redeploy_scheme=` - The scheme corresponds to one defined in `roles/clusterverse/redeploy` + `-e canary_tidy_on_success=[true|false]` - Whether to run the tidy (remove the replaced VMs and DNS) on successful redeploy + `-e canary_filter_regex='^.*-test-sysdisks.*$'` - Sets the regex pattern used to filter the target hosts by their hostnames - mandatory when using `canary=filter` + `-e myhosttypes="master,slave"`- In redeployment you can define which host type you like to redeploy. If not defined it will redeploy all host types diff --git a/EXAMPLE/cluster_defs/aws/eu-west-1/dev/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/aws/eu-west-1/dev/cluster_vars__buildenv.yml index d47c6394..e84374b4 100644 --- a/EXAMPLE/cluster_defs/aws/eu-west-1/dev/cluster_vars__buildenv.yml +++ b/EXAMPLE/cluster_defs/aws/eu-west-1/dev/cluster_vars__buildenv.yml @@ -44,7 +44,7 @@ cluster_vars: auto_volumes: - { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", volume_type: "gp3", volume_size: 8, encrypted: True, delete_on_termination: true } - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp3", volume_size: 1, encrypted: True, delete_on_termination: true } - - { device_name: "/dev/sdg", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp3", volume_size: 1, iops: 100, encrypted: True, delete_on_termination: true } + - { device_name: "/dev/sdg", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp3", volume_size: 1, iops: 3000, encrypted: True, delete_on_termination: true } lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "100%VG" } flavor: t4g.nano # image: "ami-08ff82115239305ce" # eu-west-1 22.04 arm64 hvm-ssd 20220616. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ @@ -62,7 +62,7 @@ cluster_vars: auto_volumes: - { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", volume_type: "gp3", volume_size: 8, encrypted: True, delete_on_termination: true } - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp3", volume_size: 1, encrypted: True, delete_on_termination: true, perms: { owner: "root", group: "root", mode: "775" } } - - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", volume_type: "gp3", volume_size: 1, iops: 100, encrypted: True, delete_on_termination: true } + - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", volume_type: "gp3", volume_size: 1, iops: 3000, encrypted: True, delete_on_termination: true } flavor: t3a.nano version: "{{sysdisks_version | default('')}}" vms_by_az: { a: 1, b: 1, c: 0 } diff --git a/EXAMPLE/cluster_defs/azure/cluster_vars__cloud.yml b/EXAMPLE/cluster_defs/azure/cluster_vars__cloud.yml index eae7c634..c22a3bcf 100644 --- a/EXAMPLE/cluster_defs/azure/cluster_vars__cloud.yml +++ b/EXAMPLE/cluster_defs/azure/cluster_vars__cloud.yml @@ -6,8 +6,8 @@ ssh_whitelist: ['10.0.0.0/8'] redeploy_schemes_supported: ['_scheme_addallnew_rmdisk_rollback', '_scheme_addnewvm_rmdisk_rollback', '_scheme_rmvm_rmdisk_only'] # TODO: support _scheme_rmvm_keepdisk_rollback ## Source images from which to clone. Set these as variables so they can be selected on command line (for automated testing). -_ubuntu2204image: { "publisher": "canonical", "offer": "0001-com-ubuntu-server-jammy", "sku": "22_04-lts-gen2", "version": "latest" } -_ubuntu2004image: { "publisher": "canonical", "offer": "0001-com-ubuntu-server-focal", "sku": "20_04-lts-gen2", "version": "latest" } # or specific: "version": "20.04.202107200" +_ubuntu2204image: { "publisher": "canonical", "offer": "0001-com-ubuntu-server-jammy", "sku": "22_04-lts-gen2", "version": "latest" } # or specific version: "version": "22.04.202206220" +_ubuntu2004image: { "publisher": "canonical", "offer": "0001-com-ubuntu-server-focal", "sku": "20_04-lts-gen2", "version": "latest" } _ubuntu1804image: { "publisher": "canonical", "offer": "UbuntuServer", "sku": "18_04-lts-gen2", "version": "latest" } _centos7image: { "publisher": "eurolinuxspzoo1620639373013", "offer": "centos-7-9-free", "sku": "centos-7-9-free", "version": "latest" } _alma8image: { "publisher": "almalinux", "offer": "almalinux", "sku": "8_5-gen2", "version": "latest" } diff --git a/EXAMPLE/jenkinsfiles/Jenkinsfile_ops b/EXAMPLE/jenkinsfiles/Jenkinsfile_ops index be4cc3df..3b1fe315 100644 --- a/EXAMPLE/jenkinsfiles/Jenkinsfile_ops +++ b/EXAMPLE/jenkinsfiles/Jenkinsfile_ops @@ -8,7 +8,7 @@ def DEFAULT_CLUSTERVERSE_TESTSUITE_URL = "https://github.com/dseeley/clustervers def DEFAULT_CLUSTERVERSE_TESTSUITE_BRANCH = "master" //This allows us to create our own Docker image for this specific use-case. Once it is built, it will not be rebuilt, so only adds delay the first time we use it. -def create_custom_image(image_name, params = "") { +def create_custom_image(image_name, build_opts = "") { // Create a lock to prevent building the same image in parallel lock('IMAGEBUILDLOCK__' + image_name + '__' + env.NODE_NAME) { def jenkins_username = sh(script: 'whoami', returnStdout: true).trim() @@ -22,18 +22,20 @@ def create_custom_image(image_name, params = "") { ENV HOME=${env.JENKINS_HOME} ENV PIPENV_VENV_IN_PROJECT=true ENV TZ=Europe/London + SHELL ["/bin/bash", "-c"] + RUN groupadd -g ${jenkins_gid} ${jenkins_username} && useradd -m -u ${jenkins_uid} -g ${jenkins_gid} -s /bin/bash ${jenkins_username} ### Note: use pip to install pipenv (not apt) to avoid pypa/pipenv#2196 (when using PIPENV_VENV_IN_PROJECT) RUN apt-get update \ - && apt-get install -y git iproute2 \ - python3-boto python3-boto3 python3-dev python3-distutils python3-docker python3-dnspython python3-google-auth python3-googleapi python3-jinja2 python3-jmespath python3-libcloud python3-libvirt python3-lxml python3-netaddr python3-paramiko python3-passlib python3-pip python3-pyvmomi python3-ruamel.yaml python3-setuptools python3-wheel python3-xmltodict \ - && pip3 install pycdlib pipenv ansible==5.9.0 \ - `## uncomment if ansible=6.0.0 # && ansible-galaxy collection install azure.azcollection -p \$(pip3 show ansible | grep ^Location | sed 's/Location: \\(.*\\)/\\1/') --force` \ - && pip3 install -r \$(pip3 show ansible | grep ^Location | sed 's/Location: \\(.*\\)/\\1/')/ansible_collections/azure/azcollection/requirements-azure.txt + && apt-get install -y git iproute2 python3-boto python3-boto3 python3-dev python3-distutils python3-docker python3-dnspython python3-google-auth python3-googleapi python3-jinja2 python3-jmespath python3-libcloud python3-libvirt python3-lxml python3-netaddr python3-paramiko python3-passlib python3-pip python3-pyvmomi python3-ruamel.yaml python3-setuptools python3-wheel python3-xmltodict \ + && pip3 install pycdlib pipenv ansible==${params.ANSIBLE_VERSION} + RUN if [ \$(echo -e "\$(pip3 show ansible | grep ^Version | sed -r 's/^Version: (.*)/\\1/')\\n6.4.0"|sort|head -1) != "6.4.0" ]; then ansible-galaxy collection install community.libvirt:==1.2.0 -p \$(pip3 show ansible | grep ^Location | sed 's/Location: \\(.*\\)/\\1/') --force; fi \ + && if [ \$(echo -e "\$(pip3 show ansible | grep ^Version | sed -r 's/^Version: (.*)/\\1/')\\n6.1.0"|sort|head -1) != "6.1.0" ]; then ansible-galaxy collection install azure.azcollection:==1.13.0 -p \$(pip3 show ansible | grep ^Location | sed 's/Location: \\(.*\\)/\\1/') --force; fi \ + && pip3 install -r \$(pip3 show ansible | grep ^Location | sed -r 's/^Location: (.*)/\\1/')/ansible_collections/azure/azcollection/requirements-azure.txt """.stripIndent() writeFile(file: "Dockerfile", text: dockerfile, encoding: "UTF-8") - custom_build = docker.build(image_name, params + "--network host .") + custom_build = docker.build(image_name, build_opts + "--network host .") return (custom_build) } @@ -65,6 +67,7 @@ properties([ string(name: 'CV_GIT_BRANCH', defaultValue: DEFAULT_CLUSTERVERSE_BRANCH, description: "The clusterverse branch to test."), credentials(name: 'CV_GIT_CREDS', credentialType: 'com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl', defaultValue: 'GITHUB_SVC_USER', description: 'Jenkins username/password credentials for GitHub', required: false), string(name: 'USER_CMDLINE_VARS', defaultValue: '', description: "Any user-defined command-line parameters."), + string(name: 'ANSIBLE_VERSION', defaultValue: '7.1.0', description: "Ansible version."), ]) ]) @@ -101,7 +104,7 @@ node { // docker.build("cvops", "--build-arg JENKINS_USERNAME=${jenkins_username} --build-arg JENKINS_UID=${jenkins_uid} --build-arg JENKINS_GID=${jenkins_gid} ./jenkinsfiles").inside("${docker_parent_net_str} -e JENKINS_HOME=${env.JENKINS_HOME}") { /*** Create a custom docker image within this Jenkinsfile ***/ - create_custom_image("ubuntu_cvtest", "").inside("--init ${docker_parent_net_str}") { + create_custom_image("ubuntu_cvtest_${params.ANSIBLE_VERSION}", "").inside("--init ${docker_parent_net_str}") { stage('Setup Environment') { sh 'printenv | sort' println("common_deploy_vars params:" + params) diff --git a/Pipfile b/Pipfile index 14fb588a..80434b1e 100644 --- a/Pipfile +++ b/Pipfile @@ -9,7 +9,7 @@ boto3 = "*" boto = "*" botocore = "*" requests = "*" -ansible = ">=2.9" +ansible = ">=5.6" jmespath = "*" dnspython = "*" google-auth = "*" @@ -22,4 +22,4 @@ google-api-python-client = "*" [dev-packages] [requires] -python_version = "3.7" +python_version = "3.10" diff --git a/README.md b/README.md index 7c718c59..b3b5fb86 100644 --- a/README.md +++ b/README.md @@ -41,20 +41,14 @@ To active the pipenv: ### libvirt (Qemu) + It is non-trivial to set up username/password access to a remote libvirt host, so we use an ssh key instead . + Your ssh user should be a member of the `libvirt` and `kvm` groups. -+ Store the config in - ```yaml - cluster_vars: - libvirt_ip: - username: - private_key: - storage_pool: - ``` ++ Store the config in `cluster_vars.libvirt` ### ESXi (free) + Username & password for a privileged user on an ESXi host + SSH must be enabled on the host + Set the `Config.HostAgent.vmacore.soap.maxSessionCount` variable to 0 to allow many concurrent tests to run. + Set the `Security.SshSessionLimit` variable to max (100) to allow as many ssh sessions as possible. ++ Store the config in `cluster_vars.esxi` ### Azure + Create an Azure account. diff --git a/_dependencies/tasks/main.yml b/_dependencies/tasks/main.yml index ccb598aa..7f43aea1 100644 --- a/_dependencies/tasks/main.yml +++ b/_dependencies/tasks/main.yml @@ -36,20 +36,16 @@ - name: Preflight check block: + - name: Ansible requirement + assert: + that: "ansible_version.full is version('2.12.4', '>=')" + fail_msg: "ansible-core 2.12.4 (pypi v5.6.0) is required." + - name: assertions based on required collections block: - - assert: - that: "(ansible_version.full is version('2.9.6', '>=') and ansible_version.full is version('2.10.6', '<=')) or ('community.aws' in galaxy_collections and galaxy_collections['community.aws'].version is version('1.5.0', '>='))" - fail_msg: "If Ansible > 2.9.6 then community.aws > 1.5.0 is required for valid community.aws.route53 support (by default in Ansible v4)." - - - name: azure collection requirements - block: - - assert: { that: "ansible_version.full is version_compare('2.10', '>=')", fail_msg: "ansible-core > 2.10 required for Azure support." } - - assert: { that: "'azure.azcollection' in galaxy_collections", fail_msg: "Please ensure the azure.azcollection collection is installed: ansible-galaxy collection install azure.azcollection (or ansible-galaxy collection install --ignore-errors -fr requirements.yml)" } - when: cluster_vars.type == "azure" - - name: libvirt collection requirements block: + - assert: { that: "galaxy_collections['community.libvirt'].version is version('1.2.0', '>=')", fail_msg: "community.libvirt > 1.2.0 required for libvirt support (default in Ansible >= 6.3.0)." } - assert: { that: "'dseeley.libvirt' in galaxy_collections", fail_msg: "Please ensure the dseeley.libvirt collection is installed: ansible-galaxy collection install git+https://github.com/dseeley/libvirt.git (or ansible-galaxy collection install --ignore-errors -fr requirements.yml)" } - assert: { that: "'dseeley.inventory_lookup' in galaxy_collections", fail_msg: "Please ensure the dseeley.inventory_lookup collection is installed: ansible-galaxy collection install dseeley.inventory_lookup (or ansible-galaxy collection install --ignore-errors -fr requirements.yml)" } when: cluster_vars.type == "libvirt" diff --git a/clean/tasks/aws.yml b/clean/tasks/aws.yml index c4cb10ca..ba1359fb 100644 --- a/clean/tasks/aws.yml +++ b/clean/tasks/aws.yml @@ -3,17 +3,17 @@ - name: clean/aws | clean vms block: - name: clean/aws | Remove instances termination protection - ec2: + amazon.aws.ec2_instance: aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{ cluster_vars.region }}" state: "{{ item.instance_state }}" - termination_protection: "no" + termination_protection: false instance_ids: ["{{ item.instance_id }}"] with_items: "{{ hosts_to_clean | json_query(\"[].{instance_id:instance_id, instance_state: instance_state}\") | default([]) }}" - name: clean/aws | Delete VMs - ec2: + amazon.aws.ec2_instance: aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{ cluster_vars.region }}" diff --git a/clean/tasks/libvirt.yml b/clean/tasks/libvirt.yml index 0ff90bb4..7e635108 100644 --- a/clean/tasks/libvirt.yml +++ b/clean/tasks/libvirt.yml @@ -3,7 +3,7 @@ - name: clean/libvirt block: - name: clean/libvirt | 'destroy' (forcible shutdown) VM - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' name: "{{item.name}}" state: destroyed diff --git a/cluster_hosts/tasks/get_cluster_hosts_state_aws.yml b/cluster_hosts/tasks/get_cluster_hosts_state_aws.yml index 45e6ed97..64c729c3 100644 --- a/cluster_hosts/tasks/get_cluster_hosts_state_aws.yml +++ b/cluster_hosts/tasks/get_cluster_hosts_state_aws.yml @@ -4,7 +4,7 @@ ec2_instance_info: filters: "tag:cluster_name": "{{cluster_name}}" - "instance-state-name": ["running", "stopped"] + "instance-state-name": ["running", "pending", "stopped"] aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{cluster_vars.region}}" diff --git a/cluster_hosts/tasks/get_cluster_hosts_target_libvirt.yml b/cluster_hosts/tasks/get_cluster_hosts_target_libvirt.yml index 0a18c560..93bb5abe 100644 --- a/cluster_hosts/tasks/get_cluster_hosts_target_libvirt.yml +++ b/cluster_hosts/tasks/get_cluster_hosts_target_libvirt.yml @@ -1,7 +1,7 @@ --- - name: get_cluster_hosts_target/libvirt | Get basic instance info of all vms - to get filtered images - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' command: list_vms delegate_to: localhost @@ -13,7 +13,7 @@ debug: msg={{ latest_machine }} - name: get_cluster_hosts_target/libvirt | get_xml of the latest image that matches cluster_vars.image - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' command: get_xml name: "{{ latest_machine }}" diff --git a/config/tasks/create_dns_a.yml b/config/tasks/create_dns_a.yml index d3441660..719af20c 100644 --- a/config/tasks/create_dns_a.yml +++ b/config/tasks/create_dns_a.yml @@ -83,12 +83,12 @@ aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" state: present - zone: "{{item.invocation.module_args.zone}}" - record: "{{item.invocation.module_args.record}}" + zone: "{{cluster_vars.dns_nameserver_zone}}" + record: "{{item.item.item.hostname}}.{{cluster_vars.dns_user_domain}}" type: A ttl: 60 - value: "{{item.invocation.module_args.value}}" - private_zone: "{{item.invocation.module_args.private_zone}}" + value: "{{item.item.item.ipv4}}" + private_zone: "{{item.item.item.private_zone}}" overwrite: true wait: yes become: false diff --git a/create/tasks/create_aws.yml b/create/tasks/create_aws.yml index 0352c88b..103bbf01 100644 --- a/create/tasks/create_aws.yml +++ b/create/tasks/create_aws.yml @@ -18,7 +18,7 @@ rules_egress: - proto: all cidr_ip: 0.0.0.0/0 - register: r__ec2_group + register: r__ec2_instance_group when: cluster_vars.secgroup_new | length > 0 - name: create/aws | Create EC2 VMs asynchronously and wait for completion @@ -33,28 +33,45 @@ loop: "{{ cluster_hosts_target_denormalised_by_volume | selectattr('auto_volume.src', 'defined') | list }}" - name: create/aws | Create EC2 VMs asynchronously - ec2: + amazon.aws.ec2_instance: aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{cluster_vars.region}}" key_name: "{{cluster_vars[buildenv].key_name}}" instance_type: "{{item.flavor}}" - instance_profile_name: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].instance_profile_name | default(cluster_vars.instance_profile_name | default(omit))}}" + instance_role: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].instance_profile_name | default(cluster_vars.instance_profile_name | default(omit))}}" instance_initiated_shutdown_behavior: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].instance_initiated_shutdown_behavior | default(omit)}}" - spot_price: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_price | default(omit)}}" - spot_wait_timeout: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_wait_timeout | default(10800)}}" #3 hours - spot_launch_group: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_launch_group | default(omit)}}" - spot_type: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_type | default('persistent')}}" - image: "{{ item.image }}" +# spot_price: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_price | default(omit)}}" +# spot_wait_timeout: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_wait_timeout | default(10800)}}" #3 hours +# spot_launch_group: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_launch_group | default(omit)}}" +# spot_type: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].spot.spot_type | default('persistent')}}" + image_id: "{{ item.image }}" vpc_subnet_id: "{{item.vpc_subnet_id}}" - assign_public_ip: "{{ true if ('assign_public_ip' in cluster_vars and cluster_vars.assign_public_ip in ['dynamic']) else false }}" - group: "{{ cluster_vars.secgroups_existing | default([]) + ([r__ec2_group.group_name] if r__ec2_group.group_name is defined else []) }}" + network: + assign_public_ip: "{{ true if ('assign_public_ip' in cluster_vars and cluster_vars.assign_public_ip in ['dynamic']) else false }}" + security_groups: "{{ cluster_vars.secgroups_existing | default([]) + ([r__ec2_instance_group.group_name] if r__ec2_instance_group.group_name is defined else []) }}" wait: yes - instance_tags: "{{ _instance_tags | combine(cluster_vars.custom_tagslabels | default({})) }}" + state: running + tags: "{{ _instance_tags | combine(cluster_vars.custom_tagslabels | default({})) }}" termination_protection: "{{cluster_vars[buildenv].termination_protection}}" user_data: "{{ cluster_vars.user_data | default(omit) }}" - volumes: "{{ item.auto_volumes | selectattr('src', 'undefined') | list | default([]) }}" - count_tag: { Name: "{{item.hostname}}" } + volumes: | + {%- set res = [] -%} + {%- for vol in item.auto_volumes -%} + {%- if 'src' not in vol -%} + {%- if 'volume_type' in vol and vol.volume_type == 'ephemeral' -%} + {%- set _dummy = res.append({ 'device_name': vol.device_name, 'volume_name': vol.ephemeral }) -%} + {%- else -%} + {%- set _dummy = res.append({ 'device_name': vol.device_name, 'ebs': {'volume_type': vol.volume_type, 'volume_size': vol.volume_size } }) -%} + {%- if 'iops' in vol -%} {%- set _dummy = res[res|length-1].ebs.update({'iops': vol.iops}) -%} {%- endif -%} + {%- if 'snapshot' in vol -%} {%- set _dummy = res[res|length-1].ebs.update({'snapshot_id': vol.snapshot}) -%} {%- endif -%} + {%- if 'encrypted' in vol -%} {%- set _dummy = res[res|length-1].ebs.update({'encrypted': vol.encrypted}) -%} {%- endif -%} + {%- if 'delete_on_termination' in vol -%} {%- set _dummy = res[res|length-1].ebs.update({'delete_on_termination': vol.delete_on_termination}) -%} {%- endif -%} + {%- endif -%} + {%- endif -%} + {%- endfor %} + {{ res }} + filters: { "tag:Name": "{{item.hostname}}", "instance-state-name": ["running", "pending"] } exact_count: 1 vars: _instance_tags: @@ -71,7 +88,7 @@ loop: "{{ cluster_hosts_target }}" async: 7200 poll: 0 - register: r__ec2 + register: r__ec2_instance - name: create/aws | Wait for aws instance creation to complete async_status: { jid: "{{ item.ansible_job_id }}" } @@ -79,19 +96,24 @@ until: r__async_status__ec2.finished delay: 3 retries: 300 - with_items: "{{r__ec2.results}}" + with_items: "{{r__ec2_instance.results}}" - name: create/aws | r__async_status__ec2.results debug: msg={{r__async_status__ec2.results}} + - name: create/aws | Acquire cluster_hosts_state after creating (or confirming already-created), as ec2_instance does not provide instance info if the vm already existed. + include_role: + name: clusterverse/cluster_hosts + tasks_from: "get_cluster_hosts_state_{{cluster_vars.type}}.yml" + - name: create/aws | If (cluster_vars.assign_public_ip in [true, 'static']) then associate a new elastic IP with each instance ec2_eip: aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{cluster_vars.region}}" - device_id: "{{ item }}" + device_id: "{{ item.instance_id }}" release_on_disassociation: yes - loop: "{{ r__async_status__ec2.results | json_query(\"[].tagged_instances[].id\") }}" + loop: "{{ cluster_hosts_state }}" when: "'assign_public_ip' in cluster_vars and cluster_vars.assign_public_ip in [true, 'static']" - name: create/aws | Set a fact containing the newly-created hosts @@ -103,7 +125,7 @@ aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{cluster_vars.region}}" - instance: "{{ r__async_status__ec2.results | json_query(\"[].tagged_instances[?tags.Name==`\" + item.hostname + \"`].id[] | [0]\") | default(omit) }}" + instance: "{{ cluster_hosts_state | json_query(\"[?tagslabels.Name==`\" + item.hostname + \"`].instance_id[] | [0]\") | default(omit) }}" id: "{{item.auto_volume.src.volume_id | default(omit)}}" snapshot: "{{item.auto_volume.snapshot | default(omit)}}" device_name: "{{item.auto_volume.device_name}}" @@ -115,7 +137,7 @@ loop: "{{ cluster_hosts_target_denormalised_by_volume | selectattr('auto_volume.src', 'defined') | list }}" async: 7200 poll: 0 - register: r__ec2_vol + register: r__ec2_instance_vol - name: create/aws | Wait for volume creation/ attachment to complete async_status: { jid: "{{ item.ansible_job_id }}" } @@ -123,25 +145,11 @@ until: r__async_status__ec2_vol.finished delay: 3 retries: 300 - with_items: "{{r__ec2_vol.results}}" + with_items: "{{r__ec2_instance_vol.results}}" # - name: create/aws | r__async_status__ec2_vol # debug: msg={{r__async_status__ec2_vol}} - -- name: create/aws | Tag the EBS volumes - block: - - name: create/aws | Get the ec2_instance_info for EBS tagging - ec2_instance_info: - filters: - "instance-state-name": ["running", "stopped"] - "tag:cluster_name": "{{cluster_name}}" - "tag:lifecycle_state": "current" - aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" - aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" - region: "{{cluster_vars.region}}" - register: r__ec2_instance_info - - name: create/aws | Set the ec2 volume name tag ec2_tag: aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" @@ -153,9 +161,9 @@ vars: _ec2_vols_denormalised_by_device: | {% set res = [] -%} - {%- for host_instance in r__ec2_instance_info.instances -%} - {%- for block_device in host_instance.block_device_mappings -%} - {% set _ = res.append({'hostname': host_instance.tags.Name, 'hosttype': host_instance.tags.hosttype, 'device_name': block_device.device_name, 'volume_id': block_device.ebs.volume_id}) -%} + {%- for cluster_host in cluster_hosts_state -%} + {%- for block_device in cluster_host.disk_info_cloud -%} + {% set _ = res.append({'hostname': cluster_host.tagslabels.Name, 'hosttype': cluster_host.tagslabels.hosttype, 'device_name': block_device.device_name, 'volume_id': block_device.ebs.volume_id}) -%} {%- endfor %} {%- endfor %} {{ res }} diff --git a/create/tasks/create_libvirt.yml b/create/tasks/create_libvirt.yml index 5dc106f8..7c67762c 100644 --- a/create/tasks/create_libvirt.yml +++ b/create/tasks/create_libvirt.yml @@ -57,6 +57,9 @@ uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' name: default command: refresh + register: r__virt_pool + until: r__virt_pool is success + retries: 10 when: "redeploy_scheme is defined and redeploy_scheme == '_scheme_rmvm_keepdisk_rollback'" @@ -125,7 +128,7 @@ - name: create/libvirt | Create the VMs asynchronously - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' command: define xml: | @@ -221,7 +224,7 @@ debug: msg={{cluster_hosts_created}} - name: create/libvirt | Set autostart if required - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' name: "{{item.item.hostname}}" autostart: yes @@ -229,7 +232,7 @@ when: "'autostart' in item.item.flavor and item.item.flavor.autostart|bool" - name: create/libvirt | Start the VMs - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' name: "{{item.item.hostname}}" state: running diff --git a/jenkinsfiles/Jenkinsfile_testsuite b/jenkinsfiles/Jenkinsfile_testsuite index a5e6f99d..2cc75d95 100644 --- a/jenkinsfiles/Jenkinsfile_testsuite +++ b/jenkinsfiles/Jenkinsfile_testsuite @@ -93,7 +93,7 @@ properties([ extendedChoice(name: 'CLOUD_REGION', type: 'PT_MULTI_SELECT', value: 'libvirt/dougalab,esxifree/dougalab,aws/eu-west-1,gcp/europe-west4,azure/westeurope', description: 'Specify which cloud/region(s) to test', visibleItemCount: 5), choice(name: 'BUILDENV', choices: ['', 'dev'], description: "The environment in which to run the tests"), string(name: 'CLUSTER_ID', defaultValue: 'testsuite', trim: true), - [name: 'DNS_FORCE_DISABLE', $class: 'ChoiceParameter', choiceType: 'PT_RADIO', description: '', randomName: 'choice-parameter-31196915540455', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: ''], script: [classpath: [], sandbox: true, script: 'return [\'false:selected\',\'true\',\'true,false\']']]], + [name: 'DNS_FORCE_DISABLE', $class: 'ChoiceParameter', choiceType: 'PT_RADIO', description: 'Disable managing DNS (much faster).', randomName: 'choice-parameter-31196915540455', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: ''], script: [classpath: [], sandbox: true, script: 'return [\'false:selected\',\'true\',\'true,false\']']]], extendedChoice(name: 'REDEPLOY_SCHEME', type: 'PT_CHECKBOX', value: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', defaultValue: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', description: 'Specify which redeploy scheme(s) to test', visibleItemCount: 5), choice(name: 'CLEAN_ON_FAILURE', choices: [true, false], description: "Run a clusterverse clean in the event of a failure."), extendedChoice(name: 'MYHOSTTYPES_TEST', type: 'PT_MULTI_SELECT', value: 'nomyhosttypes,myhosttypes', defaultValue: 'nomyhosttypes', descriptionPropertyValue: 'Without myhosttypes, With myhosttypes', description: 'Whether to run tests on pre-configured hosttypes.', visibleItemCount: 3), @@ -101,6 +101,8 @@ properties([ [name: 'MYHOSTTYPES_SERIAL_PARALLEL', $class: 'CascadeChoiceParameter', choiceType: 'PT_RADIO', description: 'Run the myhosttype test in serial or parallel', randomName: 'choice-parameter-424489601389882', referencedParameters: 'MYHOSTTYPES_TEST', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: 'return([])'], script: [classpath: [], sandbox: true, script: 'if (MYHOSTTYPES_TEST==\'nomyhosttypes,myhosttypes\') { return([\'serial:selected\',\'parallel\']) }']]], extendedChoice(name: 'SCALEUPDOWN', type: 'PT_MULTI_SELECT', value: 'noscale,scaleup,scaledown', defaultValue: 'noscale', description: 'Specify whether to test scaling up and/or down.', visibleItemCount: 3), extendedChoice(name: 'IMAGE_TESTED', type: 'PT_MULTI_SELECT', value: '_ubuntu2204image,_ubuntu2004image,_ubuntu1804image,_centos7image,_alma8image', defaultValue: '_ubuntu2204image', descriptionPropertyValue: 'Ubuntu 22.04, Ubuntu 20.04, Ubuntu 18.04, CentOS 7, AlmaLinux 8', description: 'Specify which image(s) to test', visibleItemCount: 4), +// extendedChoice(name: 'ANSIBLE_VERSION_X', type: 'PT_MULTI_SELECT', bindings: '', description: 'Version of Ansible to use for testing', groovyClasspath: '', groovyScript: '''import groovy.json.JsonSlurper; def semversort(def versions) { versions.collectEntries { [it, it.split(/\./).collect { (it =~ /([0-9]+).*/)[0][1] }*.toInteger()] }.sort { a, b -> [a.value, b.value].transpose().findResult { x, y -> x <=> y ?: null } ?: a.value.size() <=> b.value.size() ?: a.key <=> b.key }.keySet().collect()}; def ansibleReleases= ["curl", "-s", "-H", "Accept: application/json", "-H", "Content-type: application/json", "GET", "https://pypi.org/pypi/ansible/json"].execute().text; def list = new JsonSlurper().parseText(ansibleReleases); return semversort(list.releases.keySet().collect().findAll { it ==~ /^(?![1-4]\.|5\.[0,2-5]|5\.[1]\.).*?\.(?:\d+)$/ }).reverse()''', multiSelectDelimiter: ',', quoteValue: false, saveJSONParameterToFile: false, visibleItemCount: 5), + [name: 'ANSIBLE_VERSION', $class: 'ChoiceParameter', choiceType: 'PT_MULTI_SELECT', description: 'Version of Ansible to use for testing', filterLength: 1, filterable: false, randomName: 'choice-parameter-2731500853867', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: 'return([])'], script: [classpath: [], sandbox: true, script: 'import groovy.json.JsonSlurper; def semversort(def versions) { versions.collectEntries { [it, it.split(/\\./).collect { (it =~ /([0-9]+).*/)[0][1] }*.toInteger()] }.sort { a, b -> [a.value, b.value].transpose().findResult { x, y -> x <=> y ?: null } ?: a.value.size() <=> b.value.size() ?: a.key <=> b.key }.keySet().collect()}; def ansibleReleases= ["curl", "-s", "-H", "Accept: application/json", "-H", "Content-type: application/json", "GET", "https://pypi.org/pypi/ansible/json"].execute().text; def list = new JsonSlurper().parseText(ansibleReleases); return semversort(list.releases.keySet().collect().findAll { it ==~ /^(?![1-4]\\.|5\\.[0,2-5]|5\\.[1]\\.).*?\\.(?:\\d+)$/ }).reverse().withIndex().collect {elem, idx -> idx==0 ? elem + ":selected" : elem}']]], ]) ]) @@ -223,7 +225,7 @@ CVTEST_NOMYHOSTTYPES = new MatrixBuilder([ stageBuild.extraVars.put("skip_release_version_check", "true") stageBuild.extraVars.put("release_version", "1_0_0") stage_cvops('deploy', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) // Update the clustervars with new scaled cluster size @@ -240,15 +242,15 @@ CVTEST_NOMYHOSTTYPES = new MatrixBuilder([ if (env.REDEPLOY_SCHEME) { stageBuild.extraVars.put("release_version", "2_0_0") stage_cvops('redeploy canary=start', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'start'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'start'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) stage_cvops('redeploy canary=finish', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'finish'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'finish'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) stage_cvops('redeploy canary=tidy', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'tidy'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'tidy'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) //Need to redeploy original cluster (without scaling), to test that scaling works with next redeploy test (canary=none) @@ -272,7 +274,7 @@ CVTEST_NOMYHOSTTYPES = new MatrixBuilder([ // Run the canary=none redeploy stageBuild.extraVars.put("release_version", "3_0_0") stage_cvops('redeploy canary=none (tidy_on_success)', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) } else { stage_cvops('Redeploy not requested', stageBuild, { @@ -281,7 +283,7 @@ CVTEST_NOMYHOSTTYPES = new MatrixBuilder([ } stage_cvops('deploy on top', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) if (stageBuild.result == 'SUCCESS' || params.CLEAN_ON_FAILURE == 'true') { @@ -290,7 +292,7 @@ CVTEST_NOMYHOSTTYPES = new MatrixBuilder([ echo "Stage failure: Running clean-up on cluster..." } catchError { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'clean'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'clean'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] } } } @@ -343,7 +345,7 @@ CVTEST_MYHOSTTYPES = new MatrixBuilder([ stageBuild.extraVars.put("skip_release_version_check", "true") stageBuild.extraVars.put("release_version", "1_0_0") stage_cvops('deploy', stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'deploy'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) // Update the clustervars with new scaled cluster size @@ -361,15 +363,15 @@ CVTEST_MYHOSTTYPES = new MatrixBuilder([ stageBuild.extraVars.put("release_version", "2_0_0") params.MYHOSTTYPES_LIST.split(',').each({ my_host_type -> stage_cvops("redeploy canary=start ($my_host_type)", stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'start'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'start'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) stage_cvops("redeploy canary=finish ($my_host_type)", stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'finish'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'finish'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) stage_cvops("redeploy canary=tidy ($my_host_type)", stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'tidy'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'tidy'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: false), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) }) @@ -395,7 +397,7 @@ CVTEST_MYHOSTTYPES = new MatrixBuilder([ stageBuild.extraVars.put("release_version", "3_0_0") params.MYHOSTTYPES_LIST.split(',').each({ my_host_type -> stage_cvops("redeploy canary=none ($my_host_type) (tidy_on_success)", stageBuild, { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'redeploy'), string(name: 'REDEPLOY_SCHEME', value: (env.REDEPLOY_SCHEME ? env.REDEPLOY_SCHEME : '')), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: my_host_type), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] }) }) @@ -405,7 +407,7 @@ CVTEST_MYHOSTTYPES = new MatrixBuilder([ echo "Stage failure: Running clean-up on cluster..." } catchError { - build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'clean'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString())] + build job: 'clusterverse/clusterverse-ops', parameters: [string(name: 'APP_NAME', value: "cvtest-${env.BUILD_NUMBER}-${env.BUILD_HASH}"), string(name: 'CLOUD_REGION', value: env.CLOUD_REGION), string(name: 'BUILDENV', value: env.BUILDENV), string(name: 'CLUSTER_ID', value: env.CLUSTER_ID), booleanParam(name: 'DNS_FORCE_DISABLE', value: env.DNS_FORCE_DISABLE), string(name: 'DEPLOY_TYPE', value: 'clean'), string(name: 'REDEPLOY_SCHEME', value: ''), string(name: 'CANARY', value: 'none'), booleanParam(name: 'CANARY_TIDY_ON_SUCCESS', value: true), string(name: 'MYHOSTTYPES', value: ''), string(name: 'CV_GIT_URL', value: CV_OPS_GIT_URL), string(name: 'CV_GIT_BRANCH', value: CV_OPS_GIT_BRANCH), string(name: 'USER_CMDLINE_VARS', value: stageBuild.getExtraVarsString()), string(name: 'ANSIBLE_VERSION', value: ANSIBLE_VERSION)] } } } diff --git a/redeploy/__common/tasks/powerchange_vms_libvirt.yml b/redeploy/__common/tasks/powerchange_vms_libvirt.yml index a1b94840..4890b82c 100644 --- a/redeploy/__common/tasks/powerchange_vms_libvirt.yml +++ b/redeploy/__common/tasks/powerchange_vms_libvirt.yml @@ -15,7 +15,7 @@ with_items: "{{ hosts_to_powerchange }}" - name: "powerchange_vms/libvirt | {{powerchange_new_state}} VMs" - dseeley.libvirt.virt: + community.libvirt.virt: uri: 'qemu+ssh://{{ cluster_vars.libvirt.username }}@{{ cluster_vars.libvirt.hypervisor }}/system?keyfile=id_rsa__libvirt_svc&no_verify=1' name: "{{item.name}}" state: "{% if powerchange_new_state == 'stop' %}shutdown{% else %}running{% endif %}" diff --git a/redeploy/_scheme_rmvm_keepdisk_rollback/tasks/preflight.yml b/redeploy/_scheme_rmvm_keepdisk_rollback/tasks/preflight.yml index 7effebc6..c5e865cd 100644 --- a/redeploy/_scheme_rmvm_keepdisk_rollback/tasks/preflight.yml +++ b/redeploy/_scheme_rmvm_keepdisk_rollback/tasks/preflight.yml @@ -5,7 +5,7 @@ - block: - name: Preflight check | get ec2_instance_info for current disk information ec2_instance_info: - filters: { "instance-state-name": [ "running", "stopped" ], "tag:cluster_name": "{{cluster_name}}", "tag:lifecycle_state": "current" } + filters: { "instance-state-name": [ "running", "pending", "stopped" ], "tag:cluster_name": "{{cluster_name}}", "tag:lifecycle_state": "current" } aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{cluster_vars.region}}"