Skip to content

Commit

Permalink
Update EXAMPLE files. Update Jenkinsfile_testsuite. Allow omission of…
Browse files Browse the repository at this point in the history
… GCP rootvol_size
  • Loading branch information
Dougal Seeley committed Jan 21, 2021
1 parent 24a6078 commit 1a9583f
Show file tree
Hide file tree
Showing 15 changed files with 236 additions and 208 deletions.
7 changes: 0 additions & 7 deletions EXAMPLE/cluster_defs/aws/eu-west-1/cluster_vars__region.yml

This file was deleted.

This file was deleted.

This file was deleted.

27 changes: 27 additions & 0 deletions EXAMPLE/cluster_defs/aws/testid/cluster_vars__clusterid.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
---

prometheus_node_exporter_install: false
filebeat_install: false
metricbeat_install: false

beats_config:
filebeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for filebeat-gathered logs
# extra_logs_paths: # The array is optional, if you need to add more paths or files to scrape for logs
# - /var/log/myapp/*.log
metricbeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for metricbeat-gathered metrics
# diskio: # Diskio retrieves metrics for all disks partitions by default. When diskio.include_devices is defined, only look for defined partitions
# include_devices: ["sda", "sdb", "nvme0n1", "nvme1n1", "nvme2n1"]


cluster_vars:
dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only)
dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone)
dns_server: "" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added.
instance_profile_name: ""
custom_tagslabels:
inv_resident_id: "myresident"
inv_proposition_id: "myproposition"
inv_cost_centre: "0000000000"
_dns_nameserver_zone: *dns_nameserver_zone
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---

cluster_vars:
image: "ami-04ffbabc7935ec0e9" # eu-west-1, ubuntu, 20.04, amd64, hvm-ssd, 20210108. Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/
# image: "ami-0b850cf02cc00fdc8" # eu-west-1, CentOS7
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
---

## Bind configuration and credentials, per environment
bind9:
sandbox: {server: "", key_name: "", key_secret: ""}

cluster_vars:
sandbox:
aws_access_key: !vault |
$ANSIBLE_VAULT;1.2;AES256;sandbox
7669080460651349243347331538721104778691266429457726036813912140404310
aws_secret_key: !vault |
$ANSIBLE_VAULT;1.2;AES256;sandbox
7669080460651349243347331538721104778691266429457726036813912140404310
ssh_connection_cfg:
host: &host_ssh_connection_cfg
ansible_user: "ansible"
ansible_ssh_private_key_file: !vault |
$ANSIBLE_VAULT;1.2;AES256;sandbox
7669080460651349243347331538721104778691266429457726036813912140404310
bastion:
ssh_args: '-o ProxyCommand="ssh -i ./id_rsa_bastion -W %h:%p -q [email protected]"'
ssh_priv_key: !vault |
$ANSIBLE_VAULT;1.2;AES256;sandbox
7669080460651349243347331538721104778691266429457726036813912140404310
vpc_name: "test{{buildenv}}"
vpc_subnet_name_prefix: "{{buildenv}}-test-{{region}}"
key_name: "test__id_rsa"
termination_protection: "no"

hosttype_vars:
sys:
auto_volumes: [ ]
flavor: t3a.nano
version: "{{sys_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

sysdisks2:
auto_volumes:
- { device_name: "/dev/sda1", mountpoint: "/", fstype: "ext4", "volume_type": "gp2", "volume_size": 9, encrypted: True, "delete_on_termination": true }
- { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true, perms: { owner: "root", group: "root", mode: "775" } }
- { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }
flavor: t3a.nano
version: "{{sysdisks_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

sysdisks3:
auto_volumes:
- { device_name: "/dev/sdf", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }
- { device_name: "/dev/sdg", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }
- { device_name: "/dev/sdh", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }
flavor: t3a.nano
version: "{{sysdisks_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

hostnvme-multi:
auto_volumes:
- { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 }
- { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 }
- { device_name: "/dev/sdf", mountpoint: "/media/mysvc8", fstype: "ext4", "volume_type": "gp2", "volume_size": 1, encrypted: True, "delete_on_termination": true }
flavor: i3en.2xlarge
version: "{{sys_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

hostnvme-lvm:
auto_volumes:
- { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 }
- { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 }
lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" }
flavor: i3en.2xlarge
version: "{{sys_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

hosthdd-multi:
auto_volumes:
- { device_name: "/dev/sdb", mountpoint: "/media/mysvc", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 }
- { device_name: "/dev/sdc", mountpoint: "/media/mysvc2", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 }
- { device_name: "/dev/sdd", mountpoint: "/media/mysvc3", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 }
flavor: d2.xlarge
version: "{{sys_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

hosthdd-lvm:
auto_volumes:
- { device_name: "/dev/sdb", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral0 }
- { device_name: "/dev/sdc", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral1 }
- { device_name: "/dev/sdd", mountpoint: "/media/data", fstype: "ext4", "volume_type": "ephemeral", ephemeral: ephemeral2 }
lvmparams: { vg_name: "vg0", lv_name: "lv0", lv_size: "+100%FREE" }
flavor: d2.xlarge
version: "{{sys_version | default('')}}"
vms_by_az: { a: 1, b: 1, c: 0 }

_host_ssh_connection_cfg: { <<: *host_ssh_connection_cfg }
4 changes: 2 additions & 2 deletions EXAMPLE/cluster_defs/cluster_vars.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ redeploy_schemes_supported: ['_scheme_addallnew_rmdisk_rollback', '_scheme_addne

skip_dynamic_inventory_sshwait: true

app_name: "test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name.
app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn
app_name: "{{lookup('pipe', 'whoami')}}-test" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. Provided is a default to ensure no accidental overwriting.
app_class: "test" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn

beats_config:
filebeat:
Expand Down
25 changes: 12 additions & 13 deletions EXAMPLE/cluster_defs/gcp/cluster_vars__cloud.yml
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
---

_ubuntu2004image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210112" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/
_centos7image: "projects/centos-cloud/global/images/centos-7-v20201216"

cluster_vars:
image: "{{_ubuntu2004image}}"
image: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20210112" # Ubuntu images can be located at https://cloud-images.ubuntu.com/locator/
# image: "projects/ubuntu-os-cloud/global/images/centos-7-v20201216
dns_cloud_internal_domain: "c.{{ (_gcp_service_account_rawtext | string | from_json).project_id }}.internal" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS)
dns_server: "clouddns" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added.
assign_public_ip: "no"
inventory_ip: "private" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes'
ip_forward: "false"
metadata:
ssh-keys: "{%- if _host_ssh_connection_cfg.ansible_ssh_private_key_file is defined -%}{{ _host_ssh_connection_cfg.ansible_user }}:{{ lookup('pipe', 'ssh-keygen -y -f /dev/stdin <<SSHFILE\n' + _host_ssh_connection_cfg.ansible_ssh_private_key_file|string + '\nSSHFILE') }} {{ _host_ssh_connection_cfg.ansible_user }}{%- else -%}{{ cliargs.remote_user }}:{{ lookup('pipe', 'ssh-keygen -y -f ' + ansible_ssh_private_key_file) }} {{ cliargs.remote_user }}{%- endif -%}"
#The ssh key is either provided on the command line (as 'ansible_ssh_private_key_file'), or as a variable in cluster_vars[buildenv].ssh_connection_cfg.host.ansible_ssh_private_key_file (anchored to _host_ssh_connection_cfg.ansible_ssh_private_key_file); we can slurp the key from either variable, and then ssh-keygen it into the public key (we have to remove the comment though before we add our own, (hence the regex), because this is what gcp expects).
ssh-keys: "{%- if _host_ssh_connection_cfg.ansible_ssh_private_key_file is defined -%}{{ _host_ssh_connection_cfg.ansible_user }}:{{ lookup('pipe', 'ssh-keygen -y -f /dev/stdin <<SSHFILE\n' + _host_ssh_connection_cfg.ansible_ssh_private_key_file|string + '\nSSHFILE') | regex_replace('([\\S]+ [\\S]+)(?:.*$)?', '\\1') }} {{ _host_ssh_connection_cfg.ansible_user }}{%- else -%}{{ cliargs.remote_user }}:{{ lookup('pipe', 'ssh-keygen -y -f ' + ansible_ssh_private_key_file) | regex_replace('([\\S]+ [\\S]+)(?:.*$)?', '\\1') }} {{ cliargs.remote_user }}{%- endif -%}"
startup-script: "{%- if _ssh_whitelist is defined and _ssh_whitelist | length > 0 -%}#! /bin/bash\n\n#Whitelist my inbound IPs\n[ -f /etc/sshguard/whitelist ] && echo \"{{_ssh_whitelist | join ('\n')}}\" >>/etc/sshguard/whitelist && /bin/systemctl restart sshguard{%- endif -%}"
user-data: ""
network_fw_tags: ["{{cluster_name}}-nwtag"]
Expand All @@ -20,11 +19,11 @@ cluster_vars:
allowed: [{ip_protocol: "tcp", ports: ["22"]}]
source_ranges: "{{_ssh_whitelist}}"
description: "SSH Access"
# - name: "{{cluster_name}}-nwtag"
# allowed: [{ip_protocol: "all"}]
# source_tags: ["{{cluster_name}}-nwtag"]
# description: "Access from all VMs attached to the {{cluster_name}}-nwtag group"
# - name: "{{cluster_name}}-prometheus-node-exporter"
# allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}]
# source_tags: ["{{cluster_name}}-nwtag"]
# description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)."
- name: "{{cluster_name}}-nwtag"
allowed: [{ip_protocol: "all"}]
source_tags: ["{{cluster_name}}-nwtag"]
description: "Access from all VMs attached to the {{cluster_name}}-nwtag group"
- name: "{{cluster_name}}-prometheus-node-exporter"
allowed: [{ip_protocol: "tcp", ports: ["{{ prometheus_node_exporter_port | default(9100) }}"]}]
source_tags: ["{{cluster_name}}-nwtag"]
description: "Prometheus instances attached to {{cluster_name}}-nwtag can access the exporter port(s)."
Original file line number Diff line number Diff line change
@@ -1,13 +1,25 @@
---

prometheus_node_exporter_install: false
filebeat_install: false
metricbeat_install: false

beats_config:
filebeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for filebeat-gathered logs
# extra_logs_paths: # The array is optional, if you need to add more paths or files to scrape for logs
# - /var/log/myapp/*.log
metricbeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for metricbeat-gathered metrics
# diskio: # Diskio retrieves metrics for all disks partitions by default. When diskio.include_devices is defined, only look for defined partitions
# include_devices: ["sda", "sdb", "nvme0n1", "nvme1n1", "nvme2n1"]


cluster_vars:
dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only)
dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone)
custom_tagslabels:
territory: "uk"
cost_centre: &cost_centre "0000000000"
billing_team: ""
service: &service ""
inv_resident_id: *service
inv_cost_centre: *cost_centre
inv_resident_id: "myresident"
inv_proposition_id: "myproposition"
inv_cost_centre: "0000000000"
_dns_nameserver_zone: *dns_nameserver_zone
Loading

0 comments on commit 1a9583f

Please sign in to comment.