From 391ae1cd68a9c1b03d64db091c00df7f25780f23 Mon Sep 17 00:00:00 2001 From: Dougal Seeley Date: Fri, 30 Jul 2021 10:58:33 +0100 Subject: [PATCH] Fix redeploy regressions (#97) * Fix lvm disks; support separate root volume. * Fix 'Create a volume group' when root volume is defined. * Fix redeploy regressions + Tolerate GCP not renaming disks + When redeploying and moving the disks (`_scheme_rmvm_keepdisk_rollback`), we cannot rename the disks in GCP to match the host. Because of this, we must make allowances: + For non-lvm, we must not add device_name to the .clusterversetest__ test files for GCP. + For lvm, we must not attempt to create the volume groups if the device_name is not found in the blockdevmap return (because the names won't match the hosts). + Shorten device names to accommodate GCPs 63 character limit + Update blockdevmap.py from upstream to fix missing GCP parameters. + Redeploy failing due to undefined lvm variables on previous (redeploying) hosts in `config/tasks/disks_auto_aws_gcp_azure.yml`. Define these as default if unset and add lvm tests. + Importantly though, we shouldn't be running the playbook on the previous hosts. Consider only adding `cluster_hosts_target` to the inventory, but doing so could break application roles that need the entire inventory when redeploying, (previous hosts will still be part of the cluster for some of the redeploy). Instead, create a new inventory group `not_target_hosts`, containing hosts that are _not_ part of `cluster_hosts_target`, which can be excluded in the main `cluster.yml` hosts section: ``` - name: clusterverse | Configure the cluster hosts: all:!not_target_hosts ``` + Don't fail jenkinsfile when parameters are missing (i.e. first run of a multibranch); just exit instead (prevents github getting a fail mark against a PR on first run). --- EXAMPLE/README.md | 1 + EXAMPLE/cluster.yml | 2 +- .../testid/eu-west-1/cluster_vars__region.yml | 7 +- .../sandbox/cluster_vars__buildenv.yml | 7 +- .../cluster_defs/gcp/cluster_vars__cloud.yml | 7 +- .../sandbox/cluster_vars__buildenv.yml | 7 +- _dependencies/library/blockdevmap.py | 3 + _dependencies/tasks/main.yml | 2 +- clean/tasks/gcp.yml | 4 +- .../tasks/get_cluster_hosts_target_gcp.yml | 7 +- config/tasks/disks_auto_aws_gcp.yml | 114 +++++++++--------- dynamic_inventory/tasks/main.yml | 36 +++--- jenkinsfiles/Jenkinsfile_testsuite | 60 ++++----- 13 files changed, 142 insertions(+), 115 deletions(-) diff --git a/EXAMPLE/README.md b/EXAMPLE/README.md index 9e5a5527..38183a13 100644 --- a/EXAMPLE/README.md +++ b/EXAMPLE/README.md @@ -46,6 +46,7 @@ ansible-playbook cluster.yml -e buildenv=sandbox -e clusterid=test_gcp_euw1 --va + `-e metricbeat_install=false` - Does not install metricbeat + `-e wait_for_dns=false` - Does not wait for DNS resolution + `-e create_gcp_network=true` - Create GCP network and subnetwork (probably needed if creating from scratch and using public network) ++ `-e delete_gcp_network_on_clean=true` - Delete GCP network and subnetwork when run with `-e clean=_all_` + `-e debug_nested_log_output=true` - Show the log output from nested calls to embedded Ansible playbooks (i.e. when redeploying) + `-e cluster_vars_override='{"sandbox":{"hosttype_vars":{"sys":{"vms_by_az":{"b":1,"c":1,"d":0}}}}}'` - Ability to override cluster_vars dictionary elements from the command line. NOTE: there must be NO SPACES in this string. diff --git a/EXAMPLE/cluster.yml b/EXAMPLE/cluster.yml index bb36cbaf..d110ce4d 100644 --- a/EXAMPLE/cluster.yml +++ b/EXAMPLE/cluster.yml @@ -19,7 +19,7 @@ tasks: [ {wait_for_connection: "", tags: ["always"] } ] - name: clusterverse | Configure the cluster - hosts: all + hosts: all:!not_target_hosts tasks: [ { include_role: { name: "clusterverse/config", apply: { tags: ["clusterverse_config"]} }, tags: ["clusterverse_config"] } ] diff --git a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml index 330885ea..2ffed42d 100644 --- a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml +++ b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml @@ -1,5 +1,8 @@ --- +_ubuntu2004image: "ami-03caf24deed650e2c" # eu-west-1 20.04, amd64, hvm-ssd, 20210621. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ +_centos7image: "ami-01f35c358a86763b2" # eu-west-1 CentOS 7 by Banzai Cloud +_alma8image: "ami-05d7345cebf7a784f" # eu-west-1 Official AlmaLinux 8.x OS image + cluster_vars: - image: "ami-03caf24deed650e2c" # eu-west-1 20.04 amd64 hvm-ssd 20210621. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ -# image: "ami-0b850cf02cc00fdc8" # eu-west-1, CentOS7 + image: "{{_ubuntu2004image}}" diff --git a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml index a8e5ff54..50eeef7e 100644 --- a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml +++ b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml @@ -48,11 +48,12 @@ cluster_vars: version: "{{sysdisks_version | default('')}}" vms_by_az: { a: 1, b: 1, c: 0 } - sysdisks3: + sysdiskslvm: auto_volumes: + - { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", volume_type: "gp2", volume_size: 8, encrypted: True, delete_on_termination: true } - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp2", volume_size: 1, encrypted: True, delete_on_termination: true } - - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", volume_type: "gp2", volume_size: 1, encrypted: True, delete_on_termination: true } - - { device_name: "/dev/sdh", mountpoint: "/media/mysvc3", fstype: "ext4", volume_type: "gp2", volume_size: 1, encrypted: True, delete_on_termination: true } + - { device_name: "/dev/sdg", mountpoint: "/media/mysvc", fstype: "ext4", volume_type: "gp2", volume_size: 1, encrypted: True, delete_on_termination: true } + lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "100%VG" } flavor: t3a.nano version: "{{sysdisks_version | default('')}}" vms_by_az: { a: 1, b: 1, c: 0 } diff --git a/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml b/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml index bec3c909..7be20401 100644 --- a/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml +++ b/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml @@ -1,8 +1,11 @@ --- +_ubuntu2004image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210702" +_centos7image: "projects/centos-cloud/global/images/centos-7-v20210701" +_alma8image: "projects/almalinux-cloud/global/images/almalinux-8-v20210701" + cluster_vars: - image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210623" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ -# image: "projects/ubuntu-os-cloud/global/images/centos-7-v20201216 + image: "{{_ubuntu2004image}}" dns_cloud_internal_domain: "c.{{ (_gcp_service_account_rawtext | string | from_json).project_id }}.internal" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS) dns_server: "clouddns" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. assign_public_ip: "no" diff --git a/EXAMPLE/cluster_defs/gcp/testid/europe-west1/sandbox/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/gcp/testid/europe-west1/sandbox/cluster_vars__buildenv.yml index 4258c2d1..f777db17 100644 --- a/EXAMPLE/cluster_defs/gcp/testid/europe-west1/sandbox/cluster_vars__buildenv.yml +++ b/EXAMPLE/cluster_defs/gcp/testid/europe-west1/sandbox/cluster_vars__buildenv.yml @@ -40,12 +40,13 @@ cluster_vars: version: "{{sysdisks_version | default('')}}" vms_by_az: { d: 1, b: 1, c: 0 } - sysdisks3: + sysdiskslvm: auto_volumes: - { auto_delete: true, interface: "SCSI", volume_size: 1, mountpoint: "/media/mysvc", fstype: "ext4" } - - { auto_delete: true, interface: "SCSI", volume_size: 3, mountpoint: "/media/mysvc2", fstype: "ext4" } - - { auto_delete: true, interface: "SCSI", volume_size: 1, mountpoint: "/media/mysvc3", fstype: "ext4" } + - { auto_delete: true, interface: "SCSI", volume_size: 1, mountpoint: "/media/mysvc", fstype: "ext4" } + lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "100%VG" } flavor: "e2-micro" + rootvol_size: "25" # This is optional, and if set, MUST be bigger than the original image size (20GB on GCP) version: "{{sysdisks_version | default('')}}" vms_by_az: { d: 1, b: 1, c: 0 } diff --git a/_dependencies/library/blockdevmap.py b/_dependencies/library/blockdevmap.py index fd85e435..9750bddf 100644 --- a/_dependencies/library/blockdevmap.py +++ b/_dependencies/library/blockdevmap.py @@ -401,6 +401,9 @@ class cGCPMapper(cBlockDevMap): def __init__(self, **kwds): super(cGCPMapper, self).__init__(**kwds) + for os_device in self.device_map: + os_device.update({"device_name_cloud": os_device['SERIAL']}) + class cAwsMapper(cBlockDevMap): def __init__(self, **kwds): diff --git a/_dependencies/tasks/main.yml b/_dependencies/tasks/main.yml index db2762d5..7cf09e76 100644 --- a/_dependencies/tasks/main.yml +++ b/_dependencies/tasks/main.yml @@ -79,7 +79,7 @@ gcp_credentials_file: "gcp__{{ (cluster_vars[buildenv].gcp_service_account_rawtext if cluster_vars[buildenv].gcp_service_account_rawtext|type_debug == 'dict' else cluster_vars[buildenv].gcp_service_account_rawtext | string | from_json).project_id }}.json" when: gcp_credentials_file is not defined - - name: dynamic_inventory | stat the gcp_credentials_file + - name: stat the gcp_credentials_file stat: path={{gcp_credentials_file}} register: r__stat_gcp_credentials_file diff --git a/clean/tasks/gcp.yml b/clean/tasks/gcp.yml index d20ea3a7..4da509ed 100644 --- a/clean/tasks/gcp.yml +++ b/clean/tasks/gcp.yml @@ -51,12 +51,12 @@ project: "{{cluster_vars[buildenv].vpc_host_project_id}}" with_items: "{{ cluster_vars.firewall_rules }}" - - name: clean/gcp | Delete the GCP network (if -e create_gcp_network=true) + - name: clean/gcp | Delete the GCP network (if -e delete_gcp_network=true) gcp_compute_network: name: "{{cluster_vars[buildenv].vpc_network_name}}" auth_kind: "serviceaccount" service_account_file: "{{gcp_credentials_file}}" project: "{{cluster_vars[buildenv].vpc_host_project_id}}" state: absent - when: create_gcp_network is defined and create_gcp_network|bool + when: delete_gcp_network is defined and delete_gcp_network|bool when: clean is defined and clean == '_all_' diff --git a/cluster_hosts/tasks/get_cluster_hosts_target_gcp.yml b/cluster_hosts/tasks/get_cluster_hosts_target_gcp.yml index a7494e88..a531a147 100644 --- a/cluster_hosts/tasks/get_cluster_hosts_target_gcp.yml +++ b/cluster_hosts/tasks/get_cluster_hosts_target_gcp.yml @@ -6,8 +6,11 @@ {%- for host in cluster_hosts_target -%} {%- for vol in host.auto_volumes -%} {%- if 'device_name' not in vol -%} - {%- set _dummy = vol.update({'device_name': host.hostname + '--' + vol.mountpoint | basename }) -%} - {%- set _dummy = vol.update({'initialize_params': {'disk_name': vol.device_name, 'disk_size_gb': vol.volume_size}}) -%} + {%- if 'lvmparams' in cluster_vars[buildenv].hosttype_vars[host.hosttype] -%} + {%- set lvm_device_index = '-d' + loop.index|string -%} + {%- endif -%} + {%- set _dummy = vol.update({'device_name': host.hostname + '--' + vol.mountpoint | basename + lvm_device_index|default('') }) -%} + {%- set _dummy = vol.update({'initialize_params': {'disk_name': vol.device_name, 'disk_size_gb': vol.volume_size }}) -%} {%- endif -%} {%- endfor %} {%- endfor %} diff --git a/config/tasks/disks_auto_aws_gcp.yml b/config/tasks/disks_auto_aws_gcp.yml index 3833a616..b678be81 100644 --- a/config/tasks/disks_auto_aws_gcp.yml +++ b/config/tasks/disks_auto_aws_gcp.yml @@ -61,10 +61,10 @@ - name: disks_auto_aws_gcp | Check that we haven't mounted disks in the wrong place. Especially useful for redeploys when we're moving disks. block: - - name: "disks_auto_aws_gcp | Touch a file with the mountpoint and device name for testing that disk attachment is correct. Note: Use a unique filename here instead of writing to a file, so that more than one file per device is an error." + - name: "disks_auto_aws_gcp | Touch a file with the mountpoint and device name for testing that disk attachment is correct. Note: Use a unique filename here instead of writing to a file, so that more than one file per device is an error. Note: don't add device_name for GCP, because we can't rename the disks when redeploying and keeping disks (_scheme_rmvm_keepdisk_rollback)" become: yes file: - path: "{{item.mountpoint}}/.clusterversetest__{{inventory_hostname | regex_replace('-(?!.*-).*')}}__{{ item.mountpoint | regex_replace('\\/', '_') }}__{{ item.device_name | regex_replace('\/', '_') }}" + path: "{{item.mountpoint}}/.clusterversetest__{{inventory_hostname | regex_replace('-(?!.*-).*')}}__{{ item.mountpoint | regex_replace('\\/', '_') }}{%- if cluster_vars.type != 'gcp'-%}__{{ item.device_name | regex_replace('\/', '_') }}{%- endif -%}" state: touch loop: "{{auto_vols}}" @@ -110,62 +110,68 @@ become: yes register: r__blockdevmap - - name: disks_auto_aws_gcp/lvm | r__blockdevmap (pre-filesystem create) + - name: disks_auto_aws_gcp/lvm | r__blockdevmap (pre raid create) debug: msg={{r__blockdevmap}} - - name: disks_auto_aws_gcp/lvm | Create a volume group from all block devices - become: yes - lvg: - vg: "{{ lvmparams.vg_name }}" - pvs: "{{ r__blockdevmap.device_map | json_query(\"[?device_name_cloud && contains('\" + auto_vol_device_names + \"', device_name_cloud)].device_name_os\") | join(',')}}" - vars: - auto_vol_device_names: "{{raid_vols | map(attribute='device_name') | sort | join(',')}}" - - - name: disks_auto_aws_gcp/lvm | Create a logical volume from volume group - become: yes - lvol: - vg: "{{ lvmparams.vg_name }}" - lv: "{{ lvmparams.lv_name }}" - size: "{{ lvmparams.lv_size }}" - - - name: disks_auto_aws_gcp/lvm | Create filesystem(s) on attached volume(s) - become: yes - filesystem: - fstype: "{{ raid_vols[0].fstype }}" - dev: "/dev/{{ lvmparams.vg_name }}/{{ lvmparams.lv_name }}" - force: no + - block: + - name: disks_auto_aws_gcp/lvm | raid_vols_devices + debug: msg={{ raid_vols_devices }} - - name: disks_auto_aws_gcp/lvm | Mount created filesytem(s) persistently - become: yes - mount: - path: "{{ raid_vols[0].mountpoint }}" - src: "/dev/{{ lvmparams.vg_name }}/{{ lvmparams.lv_name }}" - fstype: "{{ raid_vols[0].fstype }}" - state: mounted - opts: _netdev - - - name: disks_auto_aws_gcp/lvm | Check that we haven't mounted disks in the wrong place. Especially useful for redeploys when we're moving disks. - block: - - name: "disks_auto_aws_gcp/lvm | Touch a file with the mountpoint for testing that disk attachment is correct. Note: Use a unique filename here instead of writing to a file, so that more than one file per device is an error." + - name: disks_auto_aws_gcp/lvm | Create a volume group from all block devices become: yes - file: - path: "{{ raid_vols[0].mountpoint }}/.clusterversetest__{{inventory_hostname | regex_replace('-(?!.*-).*')}}__{{ raid_vols[0].mountpoint | regex_replace('\\/', '_') }}" - state: touch - - - name: disks_auto_aws_gcp/lvm | Find all .clusterversetest__ files in mounted disks - find: - paths: "{{ raid_vols[0].mountpoint }}" - hidden: yes - patterns: ".clusterversetest__*" - register: r__find_test - - - debug: msg={{r__find_test}} + lvg: + vg: "{{ lvmparams.vg_name }}" + pvs: "{{ raid_vols_devices | map(attribute='device_name_os') | sort | join(',') }}" + + - name: disks_auto_aws_gcp/lvm | Create a logical volume from volume group + become: yes + lvol: + vg: "{{ lvmparams.vg_name }}" + lv: "{{ lvmparams.lv_name }}" + size: "{{ lvmparams.lv_size }}" + + - name: disks_auto_aws_gcp/lvm | Create filesystem(s) on attached volume(s) + become: yes + filesystem: + fstype: "{{ raid_vols[0].fstype }}" + dev: "/dev/{{ lvmparams.vg_name }}/{{ lvmparams.lv_name }}" + force: no + + - name: disks_auto_aws_gcp/lvm | Mount created filesytem(s) persistently + become: yes + mount: + path: "{{ raid_vols[0].mountpoint }}" + src: "/dev/{{ lvmparams.vg_name }}/{{ lvmparams.lv_name }}" + fstype: "{{ raid_vols[0].fstype }}" + state: mounted + opts: _netdev + + - name: disks_auto_aws_gcp/lvm | Check that we haven't mounted disks in the wrong place. Especially useful for redeploys when we're moving disks. + block: + - name: "disks_auto_aws_gcp/lvm | Touch a file with the mountpoint for testing that disk attachment is correct. Note: Use a unique filename here instead of writing to a file, so that more than one file per device is an error." + become: yes + file: + path: "{{ raid_vols[0].mountpoint }}/.clusterversetest__{{inventory_hostname | regex_replace('-(?!.*-).*')}}__{{ raid_vols[0].mountpoint | regex_replace('\\/', '_') }}" + state: touch + + - name: disks_auto_aws_gcp/lvm | Find all .clusterversetest__ files in mounted disks + find: + paths: "{{ raid_vols[0].mountpoint }}" + hidden: yes + patterns: ".clusterversetest__*" + register: r__find_test + + - debug: msg={{r__find_test}} + + - name: disks_auto_aws_gcp/lvm | assert that only one device descriptor file exists per disk (otherwise, indicates that this run has mapped either more than one device per mount, or a different one to previous) + assert: { that: "'files' in r__find_test != '' and r__find_test.files | length == 1", fail_msg: "ERROR - Exactly one file should exist per LVM." } + when: test_touch_disks is defined and test_touch_disks|bool + vars: + raid_vols_devices: "{{ r__blockdevmap.device_map | json_query(\"[?device_name_cloud && contains('\" + (raid_vols | map(attribute='device_name') | sort | join(',')) + \"', device_name_cloud)]\") }}" + when: raid_vols_devices | length - - name: disks_auto_aws_gcp/lvm | assert that only one device descriptor file exists per disk (otherwise, indicates that this run has mapped either more than one device per mount, or a different one to previous) - assert: { that: "'files' in r__find_test != '' and r__find_test.files | length == 1", fail_msg: "ERROR - Exactly one file should exist per LVM." } - when: test_touch_disks is defined and test_touch_disks|bool - when: (lvmparams is defined and lvmparams != '') and (raid_vols | map(attribute='mountpoint') | list | unique | count == 1) and (raid_vols | map(attribute='mountpoint') | list | count >= 2) and (raid_vols | map(attribute='fstype') | list | unique | count == 1) + when: (lvmparams is defined and lvmparams != {}) and (raid_vols | map(attribute='mountpoint') | list | unique | count == 1) and (raid_vols | map(attribute='mountpoint') | list | count >= 2) and (raid_vols | map(attribute='fstype') | list | unique | count == 1) vars: _hosttype_vars: "{{ cluster_hosts_target | json_query(\"[?hostname == '\" + inventory_hostname + \"'] | [0]\") }}" - raid_vols: "{{ _hosttype_vars.auto_volumes | selectattr('mountpoint', '!=', '/')}}" - lvmparams: "{{ cluster_vars[buildenv].hosttype_vars[_hosttype_vars.hosttype].lvmparams | default('') }}" + raid_vols: "{{ (_hosttype_vars.auto_volumes | selectattr('mountpoint', '!=', '/') | default([])) if _hosttype_vars.auto_volumes is defined else [] }}" + lvmparams: "{{ (cluster_vars[buildenv].hosttype_vars[_hosttype_vars.hosttype].lvmparams | default({})) if _hosttype_vars.hosttype is defined else {} }}" diff --git a/dynamic_inventory/tasks/main.yml b/dynamic_inventory/tasks/main.yml index 4780028a..e15b26da 100644 --- a/dynamic_inventory/tasks/main.yml +++ b/dynamic_inventory/tasks/main.yml @@ -17,17 +17,17 @@ - name: dynamic_inventory | Get (network) facts - to determine the local IP/network, to see if we need the bastion below (requires the 'ip' tool (the 'iproute2' package on Ubuntu)) setup: { gather_subset: ["network"] } -- name: dynamic_inventory | Add hosts to dynamic inventory +- name: dynamic_inventory | Add hosts to dynamic inventory (add only powered-on hosts) add_host: name: "{{ item.name }}" - groups: "{{ item.tagslabels.hosttype }},{{ cluster_name }},{{ clusterid }}{% if item.regionzone is defined and item.regionzone %},{{ item.regionzone }}{% endif %}" + groups: "{{ item.tagslabels.hosttype }},{{ cluster_name }},{{ clusterid }}{% if item.regionzone is defined and item.regionzone %},{{ item.regionzone }}{% endif %}{% if cluster_hosts_target is defined and item.name not in (cluster_hosts_target | default({}) | map(attribute='hostname')) %},not_target_hosts{% endif %}" ansible_host: "{{ item.ipv4.public if cluster_vars.inventory_ip=='public' else item.ipv4.private }}" hosttype: "{{ item.tagslabels.hosttype }}" regionzone: "{{ item.regionzone if item.regionzone else omit }}" ansible_ssh_common_args: "{{ cluster_vars[buildenv].ssh_connection_cfg.bastion.ssh_args if (_bastion_host and (not _bastion_in_host_net or (force_use_bastion is defined and force_use_bastion|bool))) else (omit) }}" # Don't use the bastion if we're running in the same subnet (assumes all hosts in subnet can operate as a bastion), or if the user sets '-e force_use_bastion=true' ansible_user: "{{ cluster_vars[buildenv].ssh_connection_cfg.host.ansible_user | default(omit) }}" ansible_ssh_private_key_file: "{{ cluster_vars[buildenv].ssh_connection_cfg.host.ansible_ssh_private_key_file | default(None) | ternary('id_rsa_ansible_ssh_private_key_file', omit) }}" - with_items: "{{ cluster_hosts_state | json_query(\"[?contains('RUNNING,running,poweredOn', instance_state)]\") }}" + with_items: "{{ cluster_hosts_state | json_query(\"[?contains(['RUNNING','running','poweredOn'], instance_state)]\") }}" vars: _local_cidr: "{{ (ansible_default_ipv4.network+'/'+ansible_default_ipv4.netmask) | ipaddr('network/prefix') }}" # Get the network the localhost IP is in _bastion_host: "{{ cluster_vars[buildenv].ssh_connection_cfg.bastion.ssh_args | default() | regex_replace('.*@([]\\w\\d\\.-]*).*', '\\1') }}" # Extract just the bastion hostname from 'cluster_vars[buildenv].ssh_connection_cfg.bastion.ssh_args' @@ -39,20 +39,24 @@ register: stat_inventory_file when: inventory_file is defined -- name: dynamic_inventory | Populate inventory file from dynamic inventory - copy: - content: | - {% for groupname in groups.keys() | sort() -%} - {% if groupname not in ["all", "ungrouped"] -%} - [{{ groupname }}] - {% for hostname in groups[groupname] | sort() %} - {{ hostname }} ansible_host={{hostvars[hostname].ansible_host}} hosttype={{ hostvars[hostname].hosttype }} {% if 'ansible_user' in hostvars[hostname] %}ansible_user='{{ hostvars[hostname].ansible_user }}'{% endif %} {% if 'ansible_ssh_private_key_file' in hostvars[hostname] %}ansible_ssh_private_key_file='{{ hostvars[hostname].ansible_ssh_private_key_file }}'{% endif %} {% if 'regionzone' in hostvars[hostname] %}regionzone={{ hostvars[hostname].regionzone }}{% endif %} {% if 'ansible_ssh_common_args' in hostvars[hostname] %}ansible_ssh_common_args='{{ hostvars[hostname].ansible_ssh_common_args }}'{% endif %}{{''}} - {% endfor %} +- block: + - name: dynamic_inventory | Populate inventory file from dynamic inventory + copy: + content: | + {% for groupname in groups.keys() | sort() -%} + {% if groupname not in ["all", "ungrouped"] -%} + [{{ groupname }}] + {% for hostname in groups[groupname] | sort() %} + {{ hostname }} ansible_host={{hostvars[hostname].ansible_host}} hosttype={{ hostvars[hostname].hosttype }} {% if 'ansible_user' in hostvars[hostname] %}ansible_user='{{ hostvars[hostname].ansible_user }}'{% endif %} {% if 'ansible_ssh_private_key_file' in hostvars[hostname] %}ansible_ssh_private_key_file='{{ hostvars[hostname].ansible_ssh_private_key_file }}'{% endif %} {% if 'regionzone' in hostvars[hostname] %}regionzone={{ hostvars[hostname].regionzone }}{% endif %} {% if 'ansible_ssh_common_args' in hostvars[hostname] %}ansible_ssh_common_args='{{ hostvars[hostname].ansible_ssh_common_args }}'{% endif %}{{''}} + {% endfor %} + + {% endif %} + {% endfor %} + dest: "{{new_inventory_file}}" + force: yes - {% endif %} - {% endfor %} - dest: "{{new_inventory_file}}" - force: yes + - name: dynamic_inventory | inventory file contents + debug: msg="{{ (lookup('file', new_inventory_file)).split('\n') | map('trim') }}" vars: new_inventory_file: "{{ inventory_file if (((stat_inventory_file.stat is defined and stat_inventory_file.stat.exists) or (stat_inventory_file.skipped is defined and stat_inventory_file.skipped)) and inventory_dir is defined and inventory_dir==playbook_dir) else playbook_dir + '/inventory_' + cluster_name }}" diff --git a/jenkinsfiles/Jenkinsfile_testsuite b/jenkinsfiles/Jenkinsfile_testsuite index 67692ca8..c8e7dc8c 100644 --- a/jenkinsfiles/Jenkinsfile_testsuite +++ b/jenkinsfiles/Jenkinsfile_testsuite @@ -43,7 +43,7 @@ class MatrixBuilder { HashMap tasks = [failFast: false] _getMatrixAxes().each() { axis -> List axisEnvVars = axis.collect { key, val -> "${key}=${val}" } - axisEnvVars.add("BUILD_HASH=" + generateMD5(hashCode() + axisEnvVars.join(','), 12)) //A unique build hash of the classid (hashcode) and the matrix elements + axisEnvVars.add("BUILD_HASH=" + generateMD5(hashCode() + axisEnvVars.join(','), 8)) //A unique build hash of the classid (hashcode) and the matrix elements tasks[axisEnvVars.join(', ')] = { this.clTaskMap(axisEnvVars) } } return (tasks) @@ -76,8 +76,8 @@ class MatrixBuilder { axes.combinations()*.sum() // calculates the cartesian product } - static String generateMD5(String s, int len = 31) { - java.security.MessageDigest.getInstance("MD5").digest(s.bytes).encodeHex().toString()[0..len] + static String generateMD5(String s, int len = 30) { + java.security.MessageDigest.getInstance("MD5").digest(s.bytes).encodeHex().toString()[0..(len - 1)] } } @@ -93,10 +93,10 @@ properties([ extendedChoice(name: 'REDEPLOY_SCHEME', type: 'PT_CHECKBOX', value: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', defaultValue: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', description: 'Specify which redeploy scheme(s) to test', visibleItemCount: 5), choice(name: 'CLEAN_ON_FAILURE', choices: [true, false], description: "Run a clusterverse clean in the event of a failure."), extendedChoice(name: 'MYHOSTTYPES_TEST', type: 'PT_MULTI_SELECT', value: 'nomyhosttypes,myhosttypes', defaultValue: 'nomyhosttypes', descriptionPropertyValue: 'Without myhosttypes, With myhosttypes', description: 'Whether to run tests on pre-configured hosttypes.', visibleItemCount: 3), - [name: 'MYHOSTTYPES_LIST', $class: 'DynamicReferenceParameter', choiceType: 'ET_FORMATTED_HTML', description: 'These hosttype definitions must exist in cluster_vars for all clusters', randomName: 'choice-parameter-423779762617532', referencedParameters: 'MYHOSTTYPES_TEST', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: 'return ""'], script: [classpath: [], sandbox: true, script: 'if (MYHOSTTYPES_TEST.split(\',\').contains(\'myhosttypes\')) { return ("") }']]], + [name: 'MYHOSTTYPES_LIST', $class: 'DynamicReferenceParameter', choiceType: 'ET_FORMATTED_HTML', description: 'These hosttype definitions must exist in cluster_vars for all clusters', randomName: 'choice-parameter-423779762617532', referencedParameters: 'MYHOSTTYPES_TEST', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: 'return ""'], script: [classpath: [], sandbox: true, script: 'if (MYHOSTTYPES_TEST.split(\',\').contains(\'myhosttypes\')) { return ("") }']]], [name: 'MYHOSTTYPES_SERIAL_PARALLEL', $class: 'CascadeChoiceParameter', choiceType: 'PT_RADIO', description: 'Run the myhosttype test in serial or parallel', randomName: 'choice-parameter-424489601389882', referencedParameters: 'MYHOSTTYPES_TEST', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: 'return([])'], script: [classpath: [], sandbox: true, script: 'if (MYHOSTTYPES_TEST==\'nomyhosttypes,myhosttypes\') { return([\'serial:selected\',\'parallel\']) }']]], extendedChoice(name: 'SCALEUPDOWN', type: 'PT_MULTI_SELECT', value: 'noscale,scaleup,scaledown', defaultValue: 'noscale', description: 'Specify whether to test scaling up and/or down.', visibleItemCount: 3), - extendedChoice(name: 'IMAGE_TESTED', type: 'PT_MULTI_SELECT', value: '_ubuntu2004image,_centos7image', defaultValue: '_ubuntu2004image', descriptionPropertyValue: 'Ubuntu 20.04, CentOS 7', description: 'Specify which image(s) to test', visibleItemCount: 3), + extendedChoice(name: 'IMAGE_TESTED', type: 'PT_MULTI_SELECT', value: '_ubuntu2004image,_centos7image,_alma8image', defaultValue: '_ubuntu2004image', descriptionPropertyValue: 'Ubuntu 20.04, CentOS 7, AlmaLinux 8', description: 'Specify which image(s) to test', visibleItemCount: 3), ]) ]) @@ -426,24 +426,6 @@ CVTEST_MYHOSTTYPES = new MatrixBuilder([ /*** The actual pipeline stage definitions ***/ /*********************************************/ -// A check stage - no actual work -stage('Check Environment') { - node { - sh 'printenv | sort' - println(params.inspect()) - if (params.BUILDENV == '') { - currentBuild.result = 'ABORTED' - error("BUILDENV not defined") - } else if (params.CLUSTER_ID == '') { - currentBuild.result = 'ABORTED' - error("CLUSTER_ID not defined") - } else if (params.CLOUD_REGION == '') { - currentBuild.result = 'ABORTED' - error("CLOUD_REGION not defined") - } - } -} - // A map to be loaded with matrices (of stages) HashMap matrixBuilds = [:] @@ -479,13 +461,33 @@ if (params.MYHOSTTYPES_TEST.split(',').contains('myhosttypes')) { } } -// Run the matrices in parallel if the MYHOSTTYPES_SERIAL_PARALLEL parameter is set (makes in mess in Blue Ocean, but is faster). Else run serially. -if (params.MYHOSTTYPES_SERIAL_PARALLEL == 'parallel') { - stage("All matrices") { - parallel(matrixBuilds) +// Only run if mandatory parameters are set. +if (params.BUILDENV != '' && params.CLUSTER_ID != '' && params.CLOUD_REGION != '') { + // A check stage - no actual work + stage('Check Environment') { + node { + sh 'printenv | sort' + println(params.inspect()) + } + } + + // Run the matrices in parallel if the MYHOSTTYPES_SERIAL_PARALLEL parameter is set (makes in mess in Blue Ocean, but is faster). Else run serially. + if (params.MYHOSTTYPES_SERIAL_PARALLEL == 'parallel') { + stage("All matrices") { + parallel(matrixBuilds) + } + } else { + matrixBuilds.each { matrix -> + matrix.value.call() + } } } else { - matrixBuilds.each { matrix -> - matrix.value.call() + currentBuild.result = 'ABORTED' + if (params.BUILDENV == '') { + println("BUILDENV not defined") + } else if (params.CLUSTER_ID == '') { + println("CLUSTER_ID not defined") + } else if (params.CLOUD_REGION == '') { + println("CLOUD_REGION not defined") } }