Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use the upstream registry role and also use a TCP proxy instead of SSL #42

Merged
merged 10 commits into from
Aug 23, 2017
52 changes: 23 additions & 29 deletions playbooks/configure.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,15 @@
- hosts: localhost
connection: local
roles:
- instance-groups
- gce-instance-groups

- hosts: cluster_hosts
roles:
- cluster-variables
- docker-storage-setup
- gce-cluster-variables
- gce-docker-storage-setup
- gce-cloudconfig
- frequent-log-rotation

- hosts: schedulable_nodes
gather_facts: no
roles:
- openshift-volume-quota

# 3.7 requires std_include in order to invoke os_firewall (for now). Conditionally include it so
# we don't break for older versions. Should be removed when os_firewall becomes a module.
- hosts: localhost
Expand Down Expand Up @@ -48,6 +43,24 @@
dest: "/tmp/"
flat: yes

- hosts: primary_master
gather_facts: no
roles:
- openshift-roles
- master-validate

- hosts: localhost
tasks:
- name: Validate the public address from outside the cluster
uri:
url: "https://{{ openshift_master_cluster_public_hostname }}:{{ console_port }}/healthz/ready"
validate_certs: False
method: GET
register: resp
until: resp.status == 200
retries: 6
delay: 5

- hosts: infra_nodes
gather_facts: no
roles:
Expand All @@ -59,24 +72,5 @@
- hosts: app_nodes
gather_facts: no
roles:
- restrict-gce-metadata

- hosts: schedulable_nodes
gather_facts: no
roles:
- openshift-emptydir-quota

- hosts: primary_master
gather_facts: no
roles:
- openshift-registry
- openshift-roles

- hosts: masters
gather_facts: no
roles:
- validate-masters

- hosts: localhost
roles:
- validate-public
- gce-node-restrict-metadata
- node-emptydir-quota
2 changes: 1 addition & 1 deletion playbooks/group_vars/all/00_playbook_defaults.yaml
14 changes: 1 addition & 13 deletions playbooks/image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,16 +74,10 @@
state: present
when: ansible_os_family == "RedHat"

# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
- name: Verify fstab entries are correct for XFS volumes
hosts: build_instance_ips
tasks:
- name: Remove barrier=1 from XFS fstab entries
command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab

- name: Build image
hosts: build_instance_ips
roles:
- role: gce-image-configure
- role: os_update_latest
post_tasks:
- name: Disable all repos on RHEL
Expand Down Expand Up @@ -112,12 +106,6 @@
warn: no
when: ansible_os_family == "RedHat"

#- name: Install and configure image
# hosts: build_instance_ips
# tasks:
# #- yum: name=* state=latest update_cache=yes
# # when: ansible_os_family == "RedHat"

- name: Commit image
hosts: localhost
connection: local
Expand Down
2 changes: 1 addition & 1 deletion playbooks/inventory.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
connection: local
gather_facts: no
roles:
- dynamic-inventory
- gce-dynamic-inventory
4 changes: 2 additions & 2 deletions playbooks/launch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
connection: local
gather_facts: no
roles:
- provision
- dynamic-inventory
- gce-provision
- gce-dynamic-inventory

- hosts: localhost
tasks:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
- name: Set the full path to the gcs_registry_keyfile as a fact
set_fact:
openshift_hosted_registry_storage_gcs_keyfile: "{{ playbook_dir + '/files/' + gcs_registry_keyfile }}"
when: gcs_registry_keyfile is defined

- include_vars:
dir: "{{ playbook_dir }}/group_vars/all"
name: _cluster_variables
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zon
openshift_master_default_subdomain: "{{ wildcard_zone }}"
osm_default_node_selector: "role=app"
openshift_deployment_type: origin
openshift_hosted_registry_storage_provider: gcs

openshift_master_identity_providers:
- name: google
kind: GoogleIdentityProvider
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
- name: create the docker-storage-setup config file
template: src=docker-storage-setup.j2 dest=/etc/sysconfig/docker-storage-setup owner=root group=root mode=0644
- name: start docker
service: name=docker state=started
service: name=docker state=restarted
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
DEVS=/dev/sdb
VG=docker-vol

{% set storagedriver = provision_gce_docker_storage_driver | default('devicemapper') %}
{% set storagedriver = provision_gce_docker_storage_driver | default('overlay2') %}
STORAGE_DRIVER="{{ storagedriver }}"
{% if storagedriver == 'devicemapper' %}
DEVS=/dev/sda
VG=docker-vol
DATA_SIZE=95%VG
EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G"
{% endif %}
Expand Down
3 changes: 3 additions & 0 deletions playbooks/roles/gce-image-configure/files/partition.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[Service]
ExecStartPost=-/usr/bin/growpart /dev/sda 1
ExecStartPost=-/sbin/xfs_growfs /
10 changes: 10 additions & 0 deletions playbooks/roles/gce-image-configure/tasks/main.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
- name: Remove barrier=1 from XFS fstab entries
command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab

- name: Ensure the root filesystem has XFS group quota turned on
command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg

- name: Ensure the root partition grows on startup
copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/

Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,7 @@

set -euo pipefail

# Bucket for registry
if gsutil ls -p "{{ gce_project_id }}" "gs://{{ provision_gce_registry_gcs_bucket }}" &>/dev/null; then
gsutil -m rm -r "gs://{{ provision_gce_registry_gcs_bucket }}"
fi

function teardown() {
function teardown_cmd() {
a=( $@ )
local name=$1
a=( "${a[@]:1}" )
Expand All @@ -28,7 +23,23 @@ function teardown() {
fi
}

function teardown() {
for i in `seq 1 3`; do
if teardown_cmd $@; then
break
fi
done
}

# Bucket for registry
(
if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
fi
) &

# DNS
(
dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
# Retry DNS changes until they succeed since this may be a shared resource
Expand Down Expand Up @@ -59,6 +70,7 @@ if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zon
break
done
fi
) &

# Preemptively spin down the instances
(
Expand Down Expand Up @@ -94,73 +106,27 @@ teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region
(
# Master SSL network rules
teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-ssl-proxies
teardown "{{ provision_prefix }}master-ssl-lb-cert" compute ssl-certificates
teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
) &

# Additional disks for instances for docker storage
instances=$(gcloud --project "{{ gce_project_id }}" compute instances list --filter='tags.items:{{ provision_prefix }}ocp AND tags.items:ocp' --format='value(name)')
for i in $instances; do
(
instance_zone=$(gcloud --project "{{ gce_project_id }}" compute instances list --filter="name:${i}" --format='value(zone)')
docker_disk="${i}-docker"
if gcloud --project "{{ gce_project_id }}" compute disks describe "$docker_disk" --zone "$instance_zone" &>/dev/null; then
if ! gcloud --project "{{ gce_project_id }}" compute instances detach-disk "${i}" --disk "$docker_disk" --zone "$instance_zone"; then
echo "warning: Unable to detach docker disk or already detached" 1>&2
fi
fi
openshift_disk="${i}-openshift"
if gcloud --project "{{ gce_project_id }}" compute disks describe "$openshift_disk" --zone "$instance_zone" &>/dev/null; then
if ! gcloud --project "{{ gce_project_id }}" compute instances detach-disk "${i}" --disk "$openshift_disk" --zone "$instance_zone"; then
echo "warning: Unable to detach openshift disk or already detached" 1>&2
fi
fi
) &
done

for i in `jobs -p`; do wait $i; done

# Wait for any remaining disks to be detached
done=
for i in `seq 1 60`; do
if [[ -z "$( gcloud --project "{{ gce_project_id }}" compute operations list --zones "{{ gce_zone_name }}" --filter 'operationType=detachDisk AND NOT status=DONE AND targetLink : "{{ provision_prefix }}ig-"' --page-size=10 --format 'value(targetLink)' --limit 1 )" ]]; then
done=1
break
fi
sleep 2
done
if [[ -z "${done}" ]]; then
echo "Failed to detach disks"
exit 1
fi

# Delete the disks in parallel with instance operations. Ignore failures to avoid preventing other expensive resources from
# being removed.
instances=$(gcloud --project "{{ gce_project_id }}" compute instances list --filter='tags.items:{{ provision_prefix }}ocp AND tags.items:ocp' --format='value(name)')
for i in $instances; do
instance_zone=$(gcloud --project "{{ gce_project_id }}" compute instances list --filter="name:${i}" --format='value(zone)')
( gcloud -q --project "{{ gce_project_id }}" compute disks delete "${i}-docker" --zone "$instance_zone" || true ) &
( gcloud -q --project "{{ gce_project_id }}" compute disks delete "${i}-openshift" --zone "$instance_zone" || true ) &
done

# Instance groups
( teardown "{{ provision_prefix }}ig-m" compute instance-groups managed --zone "{{ gce_zone_name }}" ) &
( teardown "{{ provision_prefix }}ig-n" compute instance-groups managed --zone "{{ gce_zone_name }}" ) &
( teardown "{{ provision_prefix }}ig-i" compute instance-groups managed --zone "{{ gce_zone_name }}" ) &
{% for node_group in provision_gce_node_groups %}
# teardown {{ node_group.name }}
(
teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
) &
{% endfor %}

for i in `jobs -p`; do wait $i; done

# Instance templates
( teardown "{{ provision_prefix }}instance-template-master" compute instance-templates ) &
( teardown "{{ provision_prefix }}instance-template-node" compute instance-templates ) &
( teardown "{{ provision_prefix }}instance-template-node-infra" compute instance-templates ) &

# Firewall rules
# ['name']='parameters for "gcloud compute firewall-rules create"'
# For all possible parameters see: gcloud compute firewall-rules create --help
#Firewall rules
#['name']='parameters for "gcloud compute firewall-rules create"'
#For all possible parameters see: gcloud compute firewall-rules create --help
declare -A FW_RULES=(
['icmp']=""
['ssh-external']=""
Expand All @@ -173,7 +139,12 @@ declare -A FW_RULES=(
)
for rule in "${!FW_RULES[@]}"; do
( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"
# retry a few times because this call can be flaky
for i in `seq 1 3`; do
if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
break
fi
done
fi ) &
done

Expand Down
Loading