diff --git a/EXAMPLE/cluster_defs/aws/eu-west-1/cluster_vars__region.yml b/EXAMPLE/cluster_defs/aws/eu-west-1/cluster_vars__region.yml deleted file mode 100644 index 8a45afe4..00000000 --- a/EXAMPLE/cluster_defs/aws/eu-west-1/cluster_vars__region.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -_ubuntu2004image: "ami-04ffbabc7935ec0e9" # eu-west-1, 20.04, amd64, hvm-ssd, 20210108. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ -_centos7image: "ami-0b850cf02cc00fdc8" # eu-west-1, CentOS7 - -cluster_vars: - image: "{{_ubuntu2004image}}" diff --git a/EXAMPLE/cluster_defs/aws/eu-west-1/testid/cluster_vars__clusterid.yml b/EXAMPLE/cluster_defs/aws/eu-west-1/testid/cluster_vars__clusterid.yml deleted file mode 100644 index 5abd7faa..00000000 --- a/EXAMPLE/cluster_defs/aws/eu-west-1/testid/cluster_vars__clusterid.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -cluster_vars: - dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only) - dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone) - dns_server: "route53" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. - instance_profile_name: "" - custom_tagslabels: - territory: "uk" - cost_centre: &cost_centre "0000000000" - billing_team: "" - service: &service "" - inv_resident_id: *service - inv_cost_centre: *cost_centre -_dns_nameserver_zone: *dns_nameserver_zone diff --git a/EXAMPLE/cluster_defs/aws/eu-west-1/testid/sandbox/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/aws/eu-west-1/testid/sandbox/cluster_vars__buildenv.yml deleted file mode 100644 index c42e3e6e..00000000 --- a/EXAMPLE/cluster_defs/aws/eu-west-1/testid/sandbox/cluster_vars__buildenv.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- - -cluster_vars: - sandbox: - aws_access_key: AKIAJ5FPVC2KDOYHUSOQ - aws_secret_key: !vault |- - $ANSIBLE_VAULT;1.2;AES256;sandbox - 7669080460651349243347331538721104778691266429457726036813912140404310 - ssh_connection_cfg: - host: &host_ssh_connection_cfg - ansible_user: "ansible" - ansible_ssh_private_key_file: !vault | - $ANSIBLE_VAULT;1.2;AES256;sandbox - 7669080460651349243347331538721104778691266429457726036813912140404310 -# bastion: -# ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q top@10.119.113.133"' -# ssh_priv_key: !vault | -# $ANSIBLE_VAULT;1.2;AES256;sandbox -# 7669080460651349243347331538721104778691266429457726036813912140404310 - vpc_name: "test{{buildenv}}" - vpc_subnet_name_prefix: "{{buildenv}}-test-{{region}}" - key_name: "test__id_rsa" - termination_protection: "no" - - hosttype_vars: - sys: - auto_volumes: [ ] - flavor: t3a.nano - version: "{{sys_version | default('')}}" - vms_by_az: { a: 1, b: 1, c: 0 } - - sysdisks2: - auto_volumes: - - { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", "volume_type": "gp2", "volume_size": 9, encrypted: True, "delete_on_termination": true } - - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true, perms: { owner: "root", group: "root", mode: "775" } } - - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } - flavor: t3a.nano - version: "{{sysdisks_version | default('')}}" - vms_by_az: { a: 1, b: 1, c: 0 } - -# sysdisks3: -# auto_volumes: -# - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } -# - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } -# - { device_name: "/dev/sdh", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } -# flavor: t3a.nano -# version: "{{sysdisks_version | default('')}}" -# vms_by_az: { a: 1, b: 1, c: 0 } -# -# hostnvme-multi: -# auto_volumes: -# - { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } -# - { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } -# - { device_name: "/dev/sdf", mountpoint: "/media/mysvc8", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } -# flavor: i3en.2xlarge -# version: "{{sys_version | default('')}}" -# vms_by_az: { a: 1, b: 1, c: 0 } -# -# hostnvme-lvm: -# auto_volumes: -# - { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } -# - { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } -# lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" } -# flavor: i3en.2xlarge -# version: "{{sys_version | default('')}}" -# vms_by_az: { a: 1, b: 1, c: 0 } -# -# hosthdd-multi: -# auto_volumes: -# - { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } -# - { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } -# - { device_name: "/dev/sdd", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 } -# flavor: d2.xlarge -# version: "{{sys_version | default('')}}" -# vms_by_az: { a: 1, b: 1, c: 0 } -# -# hosthdd-lvm: -# auto_volumes: -# - { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } -# - { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } -# - { device_name: "/dev/sdd", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 } -# lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" } -# flavor: d2.xlarge -# version: "{{sys_version | default('')}}" -# vms_by_az: { a: 1, b: 1, c: 0 } -_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } \ No newline at end of file diff --git a/EXAMPLE/cluster_defs/aws/testid/cluster_vars__clusterid.yml b/EXAMPLE/cluster_defs/aws/testid/cluster_vars__clusterid.yml new file mode 100644 index 00000000..42b49950 --- /dev/null +++ b/EXAMPLE/cluster_defs/aws/testid/cluster_vars__clusterid.yml @@ -0,0 +1,27 @@ +--- + +prometheus_node_exporter_install: false +filebeat_install: false +metricbeat_install: false + +beats_config: + filebeat: +# output_logstash_hosts: ["localhost:5044"] # The destination hosts for filebeat-gathered logs +# extra_logs_paths: # The array is optional, if you need to add more paths or files to scrape for logs +# - /var/log/myapp/*.log + metricbeat: +# output_logstash_hosts: ["localhost:5044"] # The destination hosts for metricbeat-gathered metrics +# diskio: # Diskio retrieves metrics for all disks partitions by default. When diskio.include_devices is defined, only look for defined partitions +# include_devices: ["sda", "sdb", "nvme0n1", "nvme1n1", "nvme2n1"] + + +cluster_vars: + dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only) + dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone) + dns_server: "" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. + instance_profile_name: "" + custom_tagslabels: + inv_resident_id: "myresident" + inv_proposition_id: "myproposition" + inv_cost_centre: "0000000000" +_dns_nameserver_zone: *dns_nameserver_zone diff --git a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml new file mode 100644 index 00000000..69e1389e --- /dev/null +++ b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/cluster_vars__region.yml @@ -0,0 +1,5 @@ +--- + +cluster_vars: + image: "ami-04ffbabc7935ec0e9" # eu-west-1, ubuntu, 20.04, amd64, hvm-ssd, 20210108. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ +# image: "ami-0b850cf02cc00fdc8" # eu-west-1, CentOS7 diff --git a/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml new file mode 100644 index 00000000..7ea0c230 --- /dev/null +++ b/EXAMPLE/cluster_defs/aws/testid/eu-west-1/sandbox/cluster_vars__buildenv.yml @@ -0,0 +1,93 @@ +--- + +## Bind configuration and credentials, per environment +bind9: + sandbox: {server: "", key_name: "", key_secret: ""} + +cluster_vars: + sandbox: + aws_access_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + aws_secret_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + ssh_connection_cfg: + host: &host_ssh_connection_cfg + ansible_user: "ansible" + ansible_ssh_private_key_file: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + bastion: + ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q user@192.168.0.1"' + ssh_priv_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + vpc_name: "test{{buildenv}}" + vpc_subnet_name_prefix: "{{buildenv}}-test-{{region}}" + key_name: "test__id_rsa" + termination_protection: "no" + + hosttype_vars: + sys: + auto_volumes: [ ] + flavor: t3a.nano + version: "{{sys_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + sysdisks2: + auto_volumes: + - { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", "volume_type": "gp2", "volume_size": 9, encrypted: True, "delete_on_termination": true } + - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true, perms: { owner: "root", group: "root", mode: "775" } } + - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } + flavor: t3a.nano + version: "{{sysdisks_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + sysdisks3: + auto_volumes: + - { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } + - { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } + - { device_name: "/dev/sdh", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } + flavor: t3a.nano + version: "{{sysdisks_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + hostnvme-multi: + auto_volumes: + - { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } + - { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } + - { device_name: "/dev/sdf", mountpoint: "/media/mysvc8", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true } + flavor: i3en.2xlarge + version: "{{sys_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + hostnvme-lvm: + auto_volumes: + - { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } + - { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } + lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" } + flavor: i3en.2xlarge + version: "{{sys_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + hosthdd-multi: + auto_volumes: + - { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } + - { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } + - { device_name: "/dev/sdd", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 } + flavor: d2.xlarge + version: "{{sys_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + + hosthdd-lvm: + auto_volumes: + - { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 } + - { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 } + - { device_name: "/dev/sdd", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 } + lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" } + flavor: d2.xlarge + version: "{{sys_version | default('')}}" + vms_by_az: { a: 1, b: 1, c: 0 } + +_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } diff --git a/EXAMPLE/cluster_defs/cluster_vars.yml b/EXAMPLE/cluster_defs/cluster_vars.yml index c55ef352..836bd019 100644 --- a/EXAMPLE/cluster_defs/cluster_vars.yml +++ b/EXAMPLE/cluster_defs/cluster_vars.yml @@ -9,8 +9,8 @@ redeploy_schemes_supported: ['_scheme_addallnew_rmdisk_rollback', '_scheme_addne skip_dynamic_inventory_sshwait: true -app_name: "test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. -app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn +app_name: "{{lookup('pipe', 'whoami')}}-test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. Provided is a default to ensure no accidental overwriting. +app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn beats_config: filebeat: diff --git a/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml b/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml index 71fdcc9d..5ccecd20 100644 --- a/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml +++ b/EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml @@ -1,17 +1,16 @@ --- -_ubuntu2004image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210112" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ -_centos7image: "projects/centos-cloud/global/images/centos-7-v20201216" - cluster_vars: - image: "{{_ubuntu2004image}}" + image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210112" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ +# image: "projects/ubuntu-os-cloud/global/images/centos-7-v20201216 dns_cloud_internal_domain: "c.{{ (_gcp_service_account_rawtext | string | from_json).project_id }}.internal" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS) dns_server: "clouddns" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. assign_public_ip: "no" inventory_ip: "private" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes' ip_forward: "false" metadata: - ssh-keys: "{%- if _host_ssh_connection_cfg.ansible_ssh_private_key_file is defined -%}{{ _host_ssh_connection_cfg.ansible_user }}:{{ lookup('pipe', 'ssh-keygen -y -f /dev/stdin < 0 -%}#! /bin/bash\n\n#Whitelist my inbound IPs\n[ -f /etc/sshguard/whitelist ] && echo \"{{_ssh_whitelist | join ('\n')}}\" >>/etc/sshguard/whitelist && /bin/systemctl restart sshguard{%- endif -%}" user-data: "" network_fw_tags: ["{{cluster_name}}-nwtag"] @@ -20,11 +19,11 @@ cluster_vars: allowed: [{ip_protocol: "tcp", ports: ["22"]}] source_ranges: "{{_ssh_whitelist}}" description: "SSH Access" -# - name: "{{cluster_name}}-nwtag" -# allowed: [{ip_protocol: "all"}] -# source_tags: ["{{cluster_name}}-nwtag"] -# description: "Access from all VMs attached to the {{cluster_name}}-nwtag group" -# - name: "{{cluster_name}}-prometheus-node-exporter" -# allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}] -# source_tags: ["{{cluster_name}}-nwtag"] -# description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)." + - name: "{{cluster_name}}-nwtag" + allowed: [{ip_protocol: "all"}] + source_tags: ["{{cluster_name}}-nwtag"] + description: "Access from all VMs attached to the {{cluster_name}}-nwtag group" + - name: "{{cluster_name}}-prometheus-node-exporter" + allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}] + source_tags: ["{{cluster_name}}-nwtag"] + description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)." diff --git a/EXAMPLE/cluster_defs/gcp/europe-west1/testid/cluster_vars__clusterid.yml b/EXAMPLE/cluster_defs/gcp/europe-west1/testid/cluster_vars__clusterid.yml index d13488b1..0a7750e3 100644 --- a/EXAMPLE/cluster_defs/gcp/europe-west1/testid/cluster_vars__clusterid.yml +++ b/EXAMPLE/cluster_defs/gcp/europe-west1/testid/cluster_vars__clusterid.yml @@ -1,13 +1,25 @@ --- +prometheus_node_exporter_install: false +filebeat_install: false +metricbeat_install: false + +beats_config: + filebeat: +# output_logstash_hosts: ["localhost:5044"] # The destination hosts for filebeat-gathered logs +# extra_logs_paths: # The array is optional, if you need to add more paths or files to scrape for logs +# - /var/log/myapp/*.log + metricbeat: +# output_logstash_hosts: ["localhost:5044"] # The destination hosts for metricbeat-gathered metrics +# diskio: # Diskio retrieves metrics for all disks partitions by default. When diskio.include_devices is defined, only look for defined partitions +# include_devices: ["sda", "sdb", "nvme0n1", "nvme1n1", "nvme2n1"] + + cluster_vars: dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only) dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone) custom_tagslabels: - territory: "uk" - cost_centre: &cost_centre "0000000000" - billing_team: "" - service: &service "" - inv_resident_id: *service - inv_cost_centre: *cost_centre + inv_resident_id: "myresident" + inv_proposition_id: "myproposition" + inv_cost_centre: "0000000000" _dns_nameserver_zone: *dns_nameserver_zone diff --git a/EXAMPLE/cluster_defs/gcp/europe-west1/testid/sandbox/cluster_vars__buildenv.yml b/EXAMPLE/cluster_defs/gcp/europe-west1/testid/sandbox/cluster_vars__buildenv.yml index d6052d0c..90d35608 100644 --- a/EXAMPLE/cluster_defs/gcp/europe-west1/testid/sandbox/cluster_vars__buildenv.yml +++ b/EXAMPLE/cluster_defs/gcp/europe-west1/testid/sandbox/cluster_vars__buildenv.yml @@ -11,15 +11,15 @@ cluster_vars: ansible_ssh_private_key_file: !vault | $ANSIBLE_VAULT;1.2;AES256;sandbox 7669080460651349243347331538721104778691266429457726036813912140404310 -# bastion: -# ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q top@10.119.113.133"' -# ssh_priv_key: !vault | -# $ANSIBLE_VAULT;1.2;AES256;sandbox -# 7669080460651349243347331538721104778691266429457726036813912140404310 - vpc_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # AKA the 'service project' if Shared VPC (https://cloud.google.com/vpc/docs/shared-vpc) is in use. - vpc_host_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # Would differ from vpc_project_id if Shared VPC is in use, (the networking is in a separate project) + bastion: + ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q user@192.168.0.1"' + ssh_priv_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + vpc_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # AKA the 'service project' if Shared VPC (https://cloud.google.com/vpc/docs/shared-vpc) is in use. + vpc_host_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # Would differ from vpc_project_id if Shared VPC is in use, (the networking is in a separate project) vpc_network_name: "test-{{buildenv}}" - vpc_subnet_name: "" + vpc_subnet_name: "" # Can be omitted if using default subnets preemptible: "no" deletion_protection: "no" @@ -27,7 +27,6 @@ cluster_vars: sys: auto_volumes: [ ] flavor: "e2-micro" - rootvol_size: "10" version: "{{sys_version | default('')}}" vms_by_az: { d: 1, b: 1, c: 0 } @@ -36,18 +35,17 @@ cluster_vars: - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc", fstype: "ext4", perms: { owner: "root", group: "root", mode: "775" } } - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc2", fstype: "ext4" } flavor: "e2-micro" - rootvol_size: "10" + rootvol_size: "25" version: "{{sysdisks_version | default('')}}" vms_by_az: { d: 1, b: 1, c: 0 } -# sysdisks3: -# auto_volumes: -# - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc", fstype: "ext4" } -# - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc2", fstype: "ext4" } -# - { auto_delete: true, interface: "SCSI", volume_size: 3, mountpoint: "/media/mysvc3", fstype: "ext4" } -# flavor: "e2-micro" -# rootvol_size: "10" -# version: "{{sysdisks_version | default('')}}" -# vms_by_az: { d: 1, b: 1, c: 0 } + sysdisks3: + auto_volumes: + - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc", fstype: "ext4" } + - { auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc2", fstype: "ext4" } + - { auto_delete: true, interface: "SCSI", volume_size: 3, mountpoint: "/media/mysvc3", fstype: "ext4" } + flavor: "e2-micro" + version: "{{sysdisks_version | default('')}}" + vms_by_az: { d: 1, b: 1, c: 0 } _gcp_service_account_rawtext: *gcp_service_account_rawtext _host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } \ No newline at end of file diff --git a/EXAMPLE/cluster_defs/test_aws_euw1/cluster_vars.yml b/EXAMPLE/cluster_defs/test_aws_euw1/cluster_vars.yml index c9201270..2336b31d 100644 --- a/EXAMPLE/cluster_defs/test_aws_euw1/cluster_vars.yml +++ b/EXAMPLE/cluster_defs/test_aws_euw1/cluster_vars.yml @@ -12,10 +12,9 @@ skip_dynamic_inventory_sshwait: true prometheus_node_exporter_install: false filebeat_install: false metricbeat_install: false -canary: none -app_name: "test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. -app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn +app_name: "{{lookup('pipe', 'whoami')}}-test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. Provided is a default to ensure no accidental overwriting. +app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn beats_config: filebeat: @@ -54,15 +53,16 @@ cluster_name: "{{app_name}}-{{buildenv}}" # Identifies the cluster within cluster_vars: type: &cloud_type "aws" - image: "ami-04ffbabc7935ec0e9" # eu-west-1, 20.04, amd64, hvm-ssd, 20210108. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ + image: "ami-04ffbabc7935ec0e9" # eu-west-1, ubuntu, 20.04, amd64, hvm-ssd, 20210108. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ +# image: "ami-0b850cf02cc00fdc8" # eu-west-1, CentOS7 region: ®ion "eu-west-1" dns_cloud_internal_domain: "{{_region}}.compute.internal" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS) dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only) dns_user_domain: "{%- if _dns_nameserver_zone -%}{{_cloud_type}}-{{_region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone) dns_server: "" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. - route53_private_zone: no # Only used when cluster_vars.type == 'aws'. Defaults to true if not set. - assign_public_ip: "yes" - inventory_ip: "public" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes' + route53_private_zone: yes # Only used when cluster_vars.type == 'aws'. Defaults to true if not set. + assign_public_ip: "no" + inventory_ip: "private" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes' instance_profile_name: "" user_data: |- #cloud-config @@ -76,7 +76,7 @@ cluster_vars: inv_service_id: "{{app_class}}" inv_cluster_id: "{{cluster_name}}" inv_cluster_type: "{{app_name}}" - inv_cost_centre: "1234" + inv_cost_centre: "0000000000" ssh_whitelist: &ssh_whitelist ['10.0.0.0/8'] secgroups_existing: [] secgroup_new: @@ -89,18 +89,17 @@ cluster_vars: # rule_desc: "Access from all VMs attached to the {{ cluster_name }}-sg group" # - proto: "tcp" # ports: ["{{ prometheus_node_exporter_port | default(9100) }}"] -# group_name: ["{{buildenv}}-private-sg"] +# group_name: "{{buildenv}}-private-sg" # rule_desc: "Prometheus instances attached to {{buildenv}}-private-sg can access the exporter port(s)." sandbox: hosttype_vars: - sys: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: t3a.nano, version: "{{sys_version | default('')}}", auto_volumes: []} -# sysdisks2: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: t3a.nano, version: "{{sysdisks_version | default('')}}", auto_volumes: [{"device_name": "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true, perms: {owner: "root", group: "sudo", mode: "775"} }, {"device_name": "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true}]} -# sysdisks3: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: t3a.nano, version: "{{sysdisks_version | default('')}}", auto_volumes: [{"device_name": "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true, perms: {owner: "root", group: "sudo", mode: "775"} }, {"device_name": "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true}, {"device_name": "/dev/sdh", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true}]} -# sysdisks-snapshot: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: t3a.nano, version: "{{sys_version | default('')}}", auto_volumes: [{"snapshot_tags": {"tag:backup_id": "57180566894481854905"}, "device_name": "/dev/sdf", mountpoint: "/media/data", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }]} -# hostnvme-multi: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: i3en.2xlarge, version: "{{sys_version | default('')}}", auto_volumes: [{device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0}, {device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1}, {"device_name": "/dev/sdf", mountpoint: "/media/mysvc8", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }] } -# hostnvme-lvm: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: i3en.2xlarge, version: "{{sys_version | default('')}}", auto_volumes: [{device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0}, {device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1}], lvmparams: {vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE"} } -# hosthdd-multi: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: d2.xlarge, version: "{{sys_version | default('')}}", auto_volumes: [{device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0}, {device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1}, {device_name: "/dev/sdd", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2}] } -# hosthdd-lvm: {vms_by_az: {a: 1, b: 1, c: 0}, flavor: d2.xlarge, version: "{{sys_version | default('')}}", auto_volumes: [{device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0}, {device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1}, {device_name: "/dev/sdd", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2}], lvmparams: {vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE"} } + sys: {auto_volumes: [], flavor: t3a.nano, version: '{{sys_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + sysdisks2: {auto_volumes: [{device_name: /dev/sda1, mountpoint: /, fstype: ext4, volume_type: gp2, volume_size: 9, encrypted: true, delete_on_termination: true}, {device_name: /dev/sdf, mountpoint: /media/mysvc, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true, perms: {owner: root, group: root, mode: '775'}}, {device_name: /dev/sdg, mountpoint: /media/mysvc2, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true}], flavor: t3a.nano, version: '{{sysdisks_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + sysdisks3: {auto_volumes: [{device_name: /dev/sdf, mountpoint: /media/mysvc, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true}, {device_name: /dev/sdg, mountpoint: /media/mysvc2, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true}, {device_name: /dev/sdh, mountpoint: /media/mysvc3, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true}], flavor: t3a.nano, version: '{{sysdisks_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + hostnvme-multi: {auto_volumes: [{device_name: /dev/sdb, mountpoint: /media/mysvc, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral0}, {device_name: /dev/sdc, mountpoint: /media/mysvc2, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral1}, {device_name: /dev/sdf, mountpoint: /media/mysvc8, fstype: ext4, volume_type: gp2, volume_size: 1, encrypted: true, delete_on_termination: true}], flavor: i3en.2xlarge, version: '{{sys_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + hostnvme-lvm: {auto_volumes: [{device_name: /dev/sdb, mountpoint: /media/data, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral0}, {device_name: /dev/sdc, mountpoint: /media/data, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral1}], lvmparams: {vg_name: vg0, lv_name: lv0, lv_size: +100%FREE}, flavor: i3en.2xlarge, version: '{{sys_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + hosthdd-multi: {auto_volumes: [{device_name: /dev/sdb, mountpoint: /media/mysvc, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral0}, {device_name: /dev/sdc, mountpoint: /media/mysvc2, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral1}, {device_name: /dev/sdd, mountpoint: /media/mysvc3, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral2}], flavor: d2.xlarge, version: '{{sys_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} + hosthdd-lvm: {auto_volumes: [{device_name: /dev/sdb, mountpoint: /media/data, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral0}, {device_name: /dev/sdc, mountpoint: /media/data, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral1}, {device_name: /dev/sdd, mountpoint: /media/data, fstype: ext4, volume_type: ephemeral, ephemeral: ephemeral2}], lvmparams: {vg_name: vg0, lv_name: lv0, lv_size: +100%FREE}, flavor: d2.xlarge, version: '{{sys_version | default('''')}}', vms_by_az: {a: 1, b: 1, c: 0}} aws_access_key: !vault | $ANSIBLE_VAULT;1.2;AES256;sandbox 7669080460651349243347331538721104778691266429457726036813912140404310 @@ -113,11 +112,11 @@ cluster_vars: ansible_ssh_private_key_file: !vault | $ANSIBLE_VAULT;1.2;AES256;sandbox 7669080460651349243347331538721104778691266429457726036813912140404310 -# bastion: -# ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q top@10.119.113.133"' -# ssh_priv_key: !vault | -# $ANSIBLE_VAULT;1.2;AES256;sandbox -# 7669080460651349243347331538721104778691266429457726036813912140404310 + bastion: + ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q user@192.168.0.1"' + ssh_priv_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 vpc_name: "test{{buildenv}}" vpc_subnet_name_prefix: "{{buildenv}}-test-{{_region}}" key_name: "test__id_rsa" @@ -126,4 +125,4 @@ _cloud_type: *cloud_type _region: *region _ssh_whitelist: *ssh_whitelist _dns_nameserver_zone: *dns_nameserver_zone -_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } \ No newline at end of file +_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } diff --git a/EXAMPLE/cluster_defs/test_gcp_euw1/cluster_vars.yml b/EXAMPLE/cluster_defs/test_gcp_euw1/cluster_vars.yml index 525fdd90..2ccfbd29 100644 --- a/EXAMPLE/cluster_defs/test_gcp_euw1/cluster_vars.yml +++ b/EXAMPLE/cluster_defs/test_gcp_euw1/cluster_vars.yml @@ -12,10 +12,9 @@ skip_dynamic_inventory_sshwait: true prometheus_node_exporter_install: false filebeat_install: false metricbeat_install: false -canary: none -app_name: "test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. -app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn +app_name: "{{lookup('pipe', 'whoami')}}-test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. Provided is a default to ensure no accidental overwriting. +app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn beats_config: filebeat: @@ -54,19 +53,20 @@ cluster_name: "{{app_name}}-{{buildenv}}" # Identifies the cluster within cluster_vars: type: &cloud_type "gcp" - image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20201211" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ -# image: "projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-v20201211a" + image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210112" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/ +# image: "projects/ubuntu-os-cloud/global/images/centos-7-v20201216 region: ®ion "europe-west1" dns_cloud_internal_domain: "c.{{ (_gcp_service_account_rawtext | string | from_json).project_id }}.internal" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS) dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only) dns_user_domain: "{%- if _dns_nameserver_zone -%}{{_cloud_type}}-{{_region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone) - dns_server: "clouddns" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. - assign_public_ip: "yes" - inventory_ip: "public" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes' + dns_server: "clouddns" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added. + assign_public_ip: "no" + inventory_ip: "private" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes' ip_forward: "false" ssh_whitelist: &ssh_whitelist ['10.0.0.0/8'] metadata: - ssh-keys: "{%- if _host_ssh_connection_cfg.ansible_ssh_private_key_file is defined -%}{{ _host_ssh_connection_cfg.ansible_user }}:{{ lookup('pipe', 'ssh-keygen -y -f /dev/stdin < 0 -%}#! /bin/bash\n\n#Whitelist my inbound IPs\n[ -f /etc/sshguard/whitelist ] && echo \"{{_ssh_whitelist | join ('\n')}}\" >>/etc/sshguard/whitelist && /bin/systemctl restart sshguard{%- endif -%}" user-data: "" custom_tagslabels: @@ -76,41 +76,44 @@ cluster_vars: inv_service_id: "{{app_class}}" inv_cluster_id: "{{cluster_name}}" inv_cluster_type: "{{app_name}}" - inv_cost_centre: "1234" + inv_cost_centre: "0000000000" network_fw_tags: ["{{cluster_name}}-nwtag"] firewall_rules: - name: "{{cluster_name}}-extssh" allowed: [{ip_protocol: "tcp", ports: ["22"]}] source_ranges: "{{_ssh_whitelist}}" description: "SSH Access" -# - name: "{{cluster_name}}-prometheus-node-exporter" -# allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}] -# source_tags: ["{{cluster_name}}-nwtag"] -# description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)." + - name: "{{cluster_name}}-nwtag" + allowed: [{ip_protocol: "all"}] + source_tags: ["{{cluster_name}}-nwtag"] + description: "Access from all VMs attached to the {{cluster_name}}-nwtag group" + - name: "{{cluster_name}}-prometheus-node-exporter" + allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}] + source_tags: ["{{cluster_name}}-nwtag"] + description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)." sandbox: hosttype_vars: - sys: {vms_by_az: {d: 1, b: 1, c: 0}, flavor: e2-micro, rootvol_size: "10", version: "{{sys_version | default('')}}", auto_volumes: []} -# sysdisks: {vms_by_az: {d: 1, b: 1, c: 0}, flavor: e2-micro, rootvol_size: "10", version: "{{sysdisks_version | default('')}}", auto_volumes: [{auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc", fstype: "ext4"}]} - sysdisks2: {vms_by_az: {d: 1, b: 1, c: 0}, flavor: e2-micro, rootvol_size: "10", version: "{{sysdisks_version | default('')}}", auto_volumes: [{auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc", fstype: "ext4"}, {auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/media/mysvc2", fstype: "ext4"}]} -# sysdisks_multi: {vms_by_az: {d: 1, b: 1, c: 0}, flavor: e2-micro, rootvol_size: "10", version: "{{sysdisks_version | default('')}}", auto_volumes: [{auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/var/log/mysvc", fstype: "ext4", perms: {owner: "root", group: "sudo", mode: "775"}}, {auto_delete: true, interface: "SCSI", volume_size: 2, mountpoint: "/var/log/mysvc2", fstype: "ext4"}, {auto_delete: true, interface: "SCSI", volume_size: 3, mountpoint: "/var/log/mysvc3", fstype: "ext4"}]} + sys: {auto_volumes: [], flavor: e2-micro, version: '{{sys_version | default('''')}}', vms_by_az: {d: 1, b: 1, c: 0}} + sysdisks2: {auto_volumes: [{auto_delete: true, interface: SCSI, volume_size: 2, mountpoint: /media/mysvc, fstype: ext4, perms: {owner: root, group: root, mode: '775'}}, {auto_delete: true, interface: SCSI, volume_size: 2, mountpoint: /media/mysvc2, fstype: ext4}], flavor: e2-micro, rootvol_size: '25', version: '{{sysdisks_version | default('''')}}', vms_by_az: {d: 1, b: 1, c: 0}} + sysdisks3: {auto_volumes: [{auto_delete: true, interface: SCSI, volume_size: 2, mountpoint: /media/mysvc, fstype: ext4}, {auto_delete: true, interface: SCSI, volume_size: 2, mountpoint: /media/mysvc2, fstype: ext4}, {auto_delete: true, interface: SCSI, volume_size: 3, mountpoint: /media/mysvc3, fstype: ext4}], flavor: e2-micro, version: '{{sysdisks_version | default('''')}}', vms_by_az: {d: 1, b: 1, c: 0}} gcp_service_account_rawtext: &gcp_service_account_rawtext !vault | - $ANSIBLE_VAULT;1.1;AES256 + $ANSIBLE_VAULT;1.2;AES256;sandbox 7669080460651349243347331538721104778691266429457726036813912140404310 ssh_connection_cfg: host: &host_ssh_connection_cfg ansible_user: "ansible" ansible_ssh_private_key_file: !vault | - $ANSIBLE_VAULT;1.2;AES256;mgmt + $ANSIBLE_VAULT;1.2;AES256;sandbox 7669080460651349243347331538721104778691266429457726036813912140404310 -# bastion: -# ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q top@10.119.113.133"' -# ssh_priv_key: !vault | -# $ANSIBLE_VAULT;1.2;AES256;mgmt -# 7669080460651349243347331538721104778691266429457726036813912140404310 - vpc_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # AKA the 'service project' if Shared VPC (https://cloud.google.com/vpc/docs/shared-vpc) is in use. - vpc_host_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # Would differ from vpc_project_id if Shared VPC is in use, (the networking is in a separate project) + bastion: + ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q user@192.168.0.1"' + ssh_priv_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;sandbox + 7669080460651349243347331538721104778691266429457726036813912140404310 + vpc_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # AKA the 'service project' if Shared VPC (https://cloud.google.com/vpc/docs/shared-vpc) is in use. + vpc_host_project_id: "{{ (_gcp_service_account_rawtext | string | from_json).project_id }}" # Would differ from vpc_project_id if Shared VPC is in use, (the networking is in a separate project) vpc_network_name: "test-{{buildenv}}" - vpc_subnet_name: "" + vpc_subnet_name: "" # Can be omitted if using default subnets preemptible: "no" deletion_protection: "no" _cloud_type: *cloud_type @@ -118,4 +121,4 @@ _region: *region _gcp_service_account_rawtext: *gcp_service_account_rawtext _ssh_whitelist: *ssh_whitelist _dns_nameserver_zone: *dns_nameserver_zone -_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } \ No newline at end of file +_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg } diff --git a/EXAMPLE/group_vars/all.yml b/EXAMPLE/group_vars/all.yml index fb139848..a81c5bda 100644 --- a/EXAMPLE/group_vars/all.yml +++ b/EXAMPLE/group_vars/all.yml @@ -4,9 +4,9 @@ merge_dict_vars_list: - "./cluster_defs/cluster_vars.yml" - "./cluster_defs/app_vars.yml" - "./cluster_defs/{{ cloud_type }}/" - - "./cluster_defs/{{ cloud_type }}/{{ region }}/" - - "./cluster_defs/{{ cloud_type }}/{{ region }}/{{ clusterid }}/" - - "./cluster_defs/{{ cloud_type }}/{{ region }}/{{ clusterid }}/{{ buildenv }}/" + - "./cluster_defs/{{ cloud_type }}/{{ clusterid }}/" + - "./cluster_defs/{{ cloud_type }}/{{ clusterid }}/{{ region }}/" + - "./cluster_defs/{{ cloud_type }}/{{ clusterid }}/{{ region }}/{{ buildenv }}/" #merge_dict_vars_list: # - "./cluster_defs/{{ clusterid }}/" diff --git a/create/tasks/gcp.yml b/create/tasks/gcp.yml index e423fae4..716ee14e 100644 --- a/create/tasks/gcp.yml +++ b/create/tasks/gcp.yml @@ -103,7 +103,7 @@ state: present deletion_protection: "{{cluster_vars[buildenv].deletion_protection}}" vars: - _bootdisk: {auto_delete: true, boot: true, device_name: "{{ item.hostname }}--boot", initialize_params: {source_image: "{{cluster_vars.image}}", disk_name: "{{ item.hostname }}--boot", disk_size_gb: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].rootvol_size}}"}} + _bootdisk: {auto_delete: true, boot: true, device_name: "{{ item.hostname }}--boot", initialize_params: {source_image: "{{cluster_vars.image}}", disk_name: "{{ item.hostname }}--boot", disk_size_gb: "{{ cluster_vars[buildenv].hosttype_vars[item.hosttype].rootvol_size | default(omit) }}"}} _autodisks: "{{item.auto_volumes | json_query(\"[].{auto_delete: auto_delete, interface: interface, device_name: device_name, initialize_params: initialize_params, source: {selfLink: src.source_url}}\") }}" _labels: name: "{{item.hostname}}" diff --git a/jenkinsfiles/Jenkinsfile_testsuite b/jenkinsfiles/Jenkinsfile_testsuite index 7b074a54..9a240971 100644 --- a/jenkinsfiles/Jenkinsfile_testsuite +++ b/jenkinsfiles/Jenkinsfile_testsuite @@ -76,9 +76,9 @@ properties([ //disableConcurrentBuilds(), //pipelineTriggers([pollSCM(ignorePostCommitHooks: true, scmpoll_spec: '''H/30 8-19 * * 1-5''')]), parameters([ - extendedChoice(name: 'CLOUD_REGION', type: 'PT_MULTI_SELECT', value: 'aws/eu-central-1,gcp/us-west2', description: 'Specify which cloud/region(s) to test', visibleItemCount: 5), - choice(name: 'BUILDENV', choices: ['', 'dev'], description: "The environment in which to run the tests"), - string(name: 'CLUSTER_ID', defaultValue: 'telem', trim: true), + extendedChoice(name: 'CLOUD_REGION', type: 'PT_MULTI_SELECT', value: 'aws/us-west-2,gcp/us-west2', description: 'Specify which cloud/region(s) to test', visibleItemCount: 5), + choice(name: 'BUILDENV', choices: ['', 'dev', 'mgmt'], description: "The environment in which to run the tests"), + string(name: 'CLUSTER_ID', defaultValue: 'top_peacock', trim: true), [name: 'DNS_FORCE_DISABLE', $class: 'ChoiceParameter', choiceType: 'PT_RADIO', description: '', randomName: 'choice-parameter-31196915540455', script: [$class: 'GroovyScript', fallbackScript: [classpath: [], sandbox: true, script: ''], script: [classpath: [], sandbox: true, script: 'return [\'false:selected\',\'true\',\'true,false\']']]], extendedChoice(name: 'REDEPLOY_SCHEME', type: 'PT_CHECKBOX', value: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', defaultValue: '_scheme_addallnew_rmdisk_rollback,_scheme_addnewvm_rmdisk_rollback,_scheme_rmvm_rmdisk_only,_scheme_rmvm_keepdisk_rollback', description: 'Specify which redeploy scheme(s) to test', visibleItemCount: 5), choice(name: 'CLEAN_ON_FAILURE', choices: [true, false], description: "Run a clusterverse clean in the event of a failure."), @@ -106,7 +106,7 @@ class cStageBuild { this.userParams.each({paramName, paramVal -> userParamsString += " -e ${paramName}=${paramVal}" }) - return(userParamsString + " -vvvv") + return(userParamsString + " -vvv") } }