From e5957c55d2e78c51fcddd461043d0347af256d2b Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Mon, 13 Jul 2020 17:04:14 +0200 Subject: [PATCH 01/19] epicli/upgrade: reusing existing shared-config + cleanups --- core/src/epicli/cli/engine/UpgradeEngine.py | 10 ++--- .../engine/ansible/AnsibleInventoryUpgrade.py | 39 +++++++++++++------ .../engine/ansible/AnsibleVarsGenerator.py | 6 +-- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/core/src/epicli/cli/engine/UpgradeEngine.py b/core/src/epicli/cli/engine/UpgradeEngine.py index 756d20a119..9e15469d58 100644 --- a/core/src/epicli/cli/engine/UpgradeEngine.py +++ b/core/src/epicli/cli/engine/UpgradeEngine.py @@ -28,24 +28,24 @@ def get_backup_dirs(self): for d in os.listdir(self.build_dir): bd = os.path.join(self.build_dir, d) if os.path.isdir(bd) and re.match(r'backup_\d', d): result.append(bd) - return result + return result def backup_build(self): - # check if there are backup dirs and if so take the latest to work with. + # Check if there are backup dirs and if so take the latest to work with backup_dirs = self.get_backup_dirs() if len(backup_dirs) > 0: self.backup_build_dir = max(backup_dirs , key=os.path.getmtime) self.logger.info(f'There is already a backup present. Using latest for upgrade: "{self.backup_build_dir}"') return - # no backup dir so use the latest + # No backup dir so use the latest backup_dir_name = f'backup_{int(round(time.time() * 1000))}' self.backup_build_dir = os.path.join(self.build_dir, backup_dir_name ) self.logger.info(f'Backing up build dir to "{self.backup_build_dir}"') shutil.copytree(self.build_dir, self.backup_build_dir) def upgrade(self): - # backup existing build + # Backup existing build self.backup_build() # Run Ansible to upgrade infrastructure @@ -53,4 +53,4 @@ def upgrade(self): ansible_options=self.ansible_options) as ansible_runner: ansible_runner.upgrade() - return 0 \ No newline at end of file + return 0 diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index d0a85450d4..70e8f06e7c 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -1,11 +1,17 @@ +import os + from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager + from cli.helpers.Step import Step from cli.helpers.build_saver import get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY +from cli.helpers.build_saver import save_inventory, MANIFEST_FILE_NAME +from cli.helpers.data_loader import load_yamls_file +from cli.helpers.objdict_helpers import dict_to_objdict +from cli.helpers.doc_list_helpers import select_single + from cli.models.AnsibleHostModel import AnsibleHostModel from cli.models.AnsibleInventoryItem import AnsibleInventoryItem -from cli.helpers.build_saver import save_inventory -from cli.helpers.objdict_helpers import dict_to_objdict class AnsibleInventoryUpgrade(Step): @@ -14,13 +20,14 @@ def __init__(self, build_dir, backup_build_dir): self.build_dir = build_dir self.backup_build_dir = backup_build_dir self.cluster_model = None + self.shared_config = None def __enter__(self): super().__enter__() return self def __exit__(self, exc_type, exc_value, traceback): - super().__exit__(exc_type, exc_value, traceback) + super().__exit__(exc_type, exc_value, traceback) def get_role(self, inventory, role_name): for role in inventory: @@ -32,7 +39,7 @@ def delete_role(self, inventory, role_name): for i in range(len(inventory)): if inventory[i].role == role_name: del inventory[i] - return + return def rename_role(self, inventory, role_name, new_role_name): role = self.get_role(inventory, role_name) @@ -40,13 +47,13 @@ def rename_role(self, inventory, role_name, new_role_name): role.role = new_role_name def upgrade(self): - inventory_path = get_inventory_path_for_build(self.backup_build_dir) + inventory_path = get_inventory_path_for_build(self.backup_build_dir) build_version = check_build_output_version(self.backup_build_dir) self.logger.info(f'Loading backup Ansible inventory: {inventory_path}') loaded_inventory = InventoryManager(loader = DataLoader(), sources=inventory_path) - # move loaded inventory to templating structure + # Move loaded inventory to templating structure new_inventory = [] for key in loaded_inventory.groups: if key != 'all' and key != 'ungrouped': @@ -56,7 +63,7 @@ def upgrade(self): new_hosts.append(AnsibleHostModel(host.address, host.vars['ansible_host'])) new_inventory.append(AnsibleInventoryItem(key, new_hosts)) - # re-constructure cluster model with all data necessary to run required upgrade rolls + # Reconstruct cluster model with all data necessary to run required upgrade rolls self.cluster_model = dict_to_objdict({ 'provider': 'any', 'specification': { @@ -67,6 +74,14 @@ def upgrade(self): } }) + # Reuse shared config from existing manifest + # Shared config contains the use_ha_control_plane flag which is required during upgrades + path_to_manifest = os.path.join(self.backup_build_dir, MANIFEST_FILE_NAME) + if not os.path.isfile(path_to_manifest): + raise Exception('No manifest.yml inside the build folder') + manifest_docs = load_yamls_file(path_to_manifest) + self.shared_config = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') + if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') @@ -79,7 +94,7 @@ def upgrade(self): self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter') self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy') - # remove linux and reboot roles if present + # Remove linux and reboot roles if present self.delete_role(new_inventory, 'linux') self.delete_role(new_inventory, 'reboot') else: @@ -91,21 +106,21 @@ def upgrade(self): raise Exception('No kubernetes_master to use as repository') master_node = master.hosts[0] - # add image_registry + # Add image_registry image_registry = self.get_role(new_inventory, 'image_registry') if image_registry == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('image_registry', hosts)) - # add repository + # Add repository repository = self.get_role(new_inventory, 'repository') if repository == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('repository', hosts)) - # save new inventory + # Save new inventory save_inventory(new_inventory, self.cluster_model, self.build_dir) - return 0 \ No newline at end of file + return 0 diff --git a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py index f4853e5db1..016462b7c5 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py @@ -93,10 +93,10 @@ def populate_group_vars(self, ansible_dir): main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars) - shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') + shared_config_doc = self.inventory_upgrade.shared_config if shared_config_doc == None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') - + self.set_vault_path(shared_config_doc) main_vars.update(shared_config_doc.specification) @@ -115,7 +115,7 @@ def set_vault_path(self, shared_config): shared_config.specification.vault_tmp_file_location = Config().vault_password_location cluster_name = self.get_cluster_name() shared_config.specification.vault_location = get_ansible_vault_path(cluster_name) - + def get_cluster_name(self): if 'name' in self.cluster_model.specification.keys(): return self.cluster_model.specification.name From bc69307be3e8005107ca01d67447d172c3ff26ce Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Mon, 13 Jul 2020 17:07:58 +0200 Subject: [PATCH 02/19] upgrade: k8s HA upgrades minimal implementation --- .../roles/upgrade/tasks/kubernetes.yml | 54 ++++++---- ...upgrade-master.yml => upgrade-master0.yml} | 15 +-- .../tasks/kubernetes/upgrade-masterN.yml | 101 ++++++++++++++++++ 3 files changed, 139 insertions(+), 31 deletions(-) rename core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/{upgrade-master.yml => upgrade-master0.yml} (92%) create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml index eec3bd482e..50a57a647c 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml @@ -1,11 +1,11 @@ --- -- name: Include wait-for-kube-apiserver.yml - import_tasks: kubernetes/wait-for-kube-apiserver.yml - delegate_to: "{{ groups['kubernetes_master'][0] }}" +- delegate_to: "{{ groups.kubernetes_master[0] }}" + block: + - name: Include wait-for-kube-apiserver.yml + import_tasks: kubernetes/wait-for-kube-apiserver.yml -- name: Include get-cluster-version.yml - import_tasks: kubernetes/get-cluster-version.yml # sets cluster_version - delegate_to: "{{ groups['kubernetes_master'][0] }}" + - name: Include get-cluster-version.yml + import_tasks: kubernetes/get-cluster-version.yml # sets cluster_version - name: Check if upgrade from current K8s version is supported assert: @@ -17,21 +17,33 @@ import_tasks: kubernetes/get-kubelet-version.yml # sets kubelet_version delegate_to: "{{ groups['kubernetes_master'][0] }}" -- name: Upgrade master to v{{ version }} - include_tasks: kubernetes/upgrade-master.yml - vars: +- vars: version: "{{ ver }}" cni_version: "{{ cni_ver }}" - when: - - groups['kubernetes_master'][0] == inventory_hostname - - cluster_version is version('v' + version, '<=') + block: + - when: cluster_version is version('v' + version, '<=') + block: + - name: Upgrade master0 to v{{ version }} + include_tasks: kubernetes/upgrade-master0.yml + when: + - inventory_hostname == groups.kubernetes_master[0] -- name: Upgrade node to v{{ version }} - include_tasks: kubernetes/upgrade-node.yml - vars: - version: "{{ ver }}" - cni_version: "{{ cni_ver }}" - when: - - groups['kubernetes_node'] is defined - - inventory_hostname in groups['kubernetes_node'] - - kubelet_version is version('v' + version, '<=') + - name: Upgrade masterN to v{{ version }} + include_tasks: kubernetes/upgrade-masterN.yml + when: + - inventory_hostname in groups.kubernetes_master[1:] + + - name: upgrade-master | Verify cluster version + include_tasks: kubernetes/verify-upgrade.yml + when: + - inventory_hostname == groups.kubernetes_master[0] + + - when: kubelet_version is version('v' + version, '<=') + block: + - name: Upgrade node to v{{ version }} + include_tasks: kubernetes/upgrade-node.yml + when: + - groups.kubernetes_node is defined + - inventory_hostname in groups.kubernetes_node + +# TODO: Create a flag file that the upgrade completed to not run it again for the same version next time diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml similarity index 92% rename from core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master.yml rename to core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index b705e1a3d1..9d8d2e71aa 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -33,8 +33,8 @@ KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data when: - - groups['kubernetes_node'] is defined - - groups['kubernetes_node']|length > 0 # drain master only if there is at least one worker node + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # drain master only if there is at least one worker node - name: upgrade-master | Wait for cluster's readiness include_tasks: wait.yml @@ -122,7 +122,7 @@ include_tasks: upgrade-kubeadm-config.yml when: kubeadm_config_file.stat.exists -- name: upgrade-master | Upgrade Docker # this may restart Docker daemon +- name: upgrade-master | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml - name: upgrade-master | Stop Kubelet @@ -163,10 +163,5 @@ retries: 20 delay: 5 when: - - groups['kubernetes_node'] is defined - - groups['kubernetes_node']|length > 0 # master is drained only if there is at least one worker node - -- name: upgrade-master | Verify cluster version - include_tasks: verify-upgrade.yml - -# TODO: Create a flag file that the upgrade completed to not run it again for the same version next time + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # master is drained only if there is at least one worker node diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml new file mode 100644 index 0000000000..53c0afaa8e --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml @@ -0,0 +1,101 @@ +--- +- name: upgrade-master | Wait for cluster's readiness + include_tasks: wait.yml + +- name: upgrade-master | Check if /etc/kubeadm/kubeadm-config.yml exists + stat: + path: /etc/kubeadm/kubeadm-config.yml + changed_when: false + register: kubeadm_config_file + +- name: upgrade-master | Drain master in preparation for maintenance + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data + when: + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # drain master only if there is at least one worker node + +- name: upgrade-master | Wait for cluster's readiness + include_tasks: wait.yml + +- name: upgrade-master | Upgrade, configure packages + block: + - name: upgrade-master | Hold packages for Debian family + include_tasks: "Debian/hold-packages.yml" + when: ansible_os_family == "Debian" + + - name: upgrade-master | Install kubeadm + include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" + + - name: upgrade-master | Wait for cluster's readiness + include_tasks: wait.yml + + - name: upgrade-master | Upgrade master {{ inventory_hostname }} + shell: >- + kubeadm upgrade node + changed_when: false + register: result + until: result is succeeded + retries: 20 + delay: 30 + + - name: Install kubelet and kubectl for {{ version }} + include_tasks: >- + {%- if cni_in_kubelet is undefined or not cni_in_kubelet -%} + {{ ansible_os_family }}/install-packages.yml + {%- else -%} + {{ ansible_os_family }}/install-packages-cni-in-kubelet.yml + {%- endif -%} + when: result is succeeded + +- name: upgrade-master | Wait for cluster's readiness + include_tasks: wait.yml + +- name: upgrade-master | Upgrade kubeadm-config.yml if exists + include_tasks: upgrade-kubeadm-config.yml + when: kubeadm_config_file.stat.exists + +- name: upgrade-master | Upgrade Docker # this may restart Docker daemon + include_tasks: docker.yml + +- name: upgrade-master | Stop Kubelet + systemd: + state: stopped + name: kubelet + +- name: upgrade-master | Stop Docker + systemd: + state: stopped + name: docker + +- name: upgrade-master | Reload daemon + systemd: + daemon_reload: yes + +- name: upgrade-master | Start Docker + systemd: + name: docker + state: started + enabled: yes + +- name: upgrade-master | Start Kubelet + systemd: + name: kubelet + state: started + enabled: yes + +- name: upgrade-master | Wait for cluster's readiness + include_tasks: wait.yml + +- name: upgrade-master | Uncordon master - mark master as schedulable + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + shell: kubectl uncordon {{ inventory_hostname }} + register: output + until: output is succeeded + retries: 20 + delay: 5 + when: + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # master is drained only if there is at least one worker node From d2ba76c2430803ec423250d4b1e372a939151578 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Tue, 14 Jul 2020 14:29:28 +0200 Subject: [PATCH 03/19] upgrade: kubernetes cleanup and refactor --- .../roles/upgrade/tasks/image-registry.yml | 24 ++-- .../roles/upgrade/tasks/kubernetes.yml | 2 +- .../tasks/kubernetes/Debian/hold-packages.yml | 7 +- .../kubernetes/Debian/install-kubeadm.yml | 13 ++- .../install-packages-cni-in-kubelet.yml | 13 +-- .../kubernetes/Debian/install-packages.yml | 9 +- .../kubernetes/RedHat/install-kubeadm.yml | 9 +- .../install-packages-cni-in-kubelet.yml | 9 +- .../kubernetes/RedHat/install-packages.yml | 9 +- .../tasks/kubernetes/downgrade-coredns.yml | 10 +- .../tasks/kubernetes/get-cluster-version.yml | 6 +- .../tasks/kubernetes/get-kubelet-version.yml | 6 +- .../upgrade/tasks/kubernetes/node/drain.yml | 7 -- .../tasks/kubernetes/node/uncordon.yml | 11 -- ....yml => patch-kubeadm-etcd-encryption.yml} | 30 ++--- .../reconfigure-auth-service-app.yml | 51 ++++---- .../kubernetes/reconfigure-rabbitmq-app.yml | 49 ++++---- .../update-kubeadm-image-repository.yml | 16 +-- .../tasks/kubernetes/upgrade-coredns.yml | 3 +- .../kubernetes/upgrade-kubeadm-config.yml | 14 ++- .../upgrade-kubernetes-dashboard.yml | 53 ++++----- .../tasks/kubernetes/upgrade-master0.yml | 85 +++----------- .../tasks/kubernetes/upgrade-masterN.yml | 78 ++----------- .../kubernetes/upgrade-network-components.yml | 81 ++++++------- .../upgrade/tasks/kubernetes/upgrade-node.yml | 24 ++-- .../upgrade/tasks/kubernetes/utils/drain.yml | 19 +++ .../utils/reload-kubelet-and-docker.yml | 26 +++++ .../tasks/kubernetes/utils/uncordon.yml | 24 ++++ .../utils/wait-for-kube-apiserver.yml | 15 +++ .../upgrade/tasks/kubernetes/utils/wait.yml | 43 +++++++ .../tasks/kubernetes/verify-upgrade.yml | 110 +++++++++--------- .../kubernetes/wait-for-kube-apiserver.yml | 12 -- .../roles/upgrade/tasks/kubernetes/wait.yml | 50 -------- 33 files changed, 438 insertions(+), 480 deletions(-) delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/drain.yml delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/uncordon.yml rename core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/{patch-kubeadm-config.yml => patch-kubeadm-etcd-encryption.yml} (76%) create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait-for-kube-apiserver.yml delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/image-registry.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/image-registry.yml index b7721eafce..c43ba58244 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/image-registry.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/image-registry.yml @@ -8,16 +8,16 @@ - name: Reconfigure Docker for pulling images from local registry block: - name: image-registry | Drain node in preparation for Docker reconfiguration - include_tasks: kubernetes/node/drain.yml + include_tasks: kubernetes/utils/drain.yml when: - - groups['kubernetes_node'] is defined - - inventory_hostname in groups['kubernetes_node'] + - groups.kubernetes_node is defined + - inventory_hostname in groups.kubernetes_node - name: image-registry | Wait for cluster's readiness - include_tasks: kubernetes/wait.yml + include_tasks: kubernetes/utils/wait.yml when: - - groups['kubernetes_node'] is defined - - inventory_hostname in groups['kubernetes_node'] + - groups.kubernetes_node is defined + - inventory_hostname in groups.kubernetes_node - name: image-registry | Reconfigure Docker if necessary # this restarts Docker daemon include_role: @@ -25,15 +25,15 @@ tasks_from: configure-docker - name: Include wait-for-kube-apiserver.yml - include_tasks: kubernetes/wait-for-kube-apiserver.yml + include_tasks: kubernetes/utils/wait-for-kube-apiserver.yml when: - - inventory_hostname in groups['kubernetes_master'] + - inventory_hostname in groups.kubernetes_master - name: image-registry | Uncordon node - mark node as schedulable - include_tasks: kubernetes/node/uncordon.yml + include_tasks: kubernetes/utils/uncordon.yml when: - - groups['kubernetes_node'] is defined - - inventory_hostname in groups['kubernetes_node'] + - groups.kubernetes_node is defined + - inventory_hostname in groups.kubernetes_node when: - - not image_registry_address in result.stdout \ No newline at end of file + - not image_registry_address in result.stdout diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml index 50a57a647c..556117d5cb 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml @@ -2,7 +2,7 @@ - delegate_to: "{{ groups.kubernetes_master[0] }}" block: - name: Include wait-for-kube-apiserver.yml - import_tasks: kubernetes/wait-for-kube-apiserver.yml + import_tasks: kubernetes/utils/wait-for-kube-apiserver.yml - name: Include get-cluster-version.yml import_tasks: kubernetes/get-cluster-version.yml # sets cluster_version diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml index d87956f410..2e98e862c1 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml @@ -1,6 +1,7 @@ -- name: "upgrade-master | Hold packages: {{ packages | join(', ') }}" - shell: >- - apt-mark hold {{ packages | join(' ') }} +--- +- name: "upgrade-master | Hold packages: {{ packages | join( ', ' ) }}" + command: >- + apt-mark hold {{ packages | join( ' ' ) }} vars: packages: >- {%- if cni_in_kubelet is undefined or not cni_in_kubelet -%} diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml index 9adab411eb..eadc4ca0f8 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml @@ -1,14 +1,15 @@ -- name: "upgrade-master | Unhold kubeadm" - shell: >- +--- +- name: upgrade-master | Unhold kubeadm + command: >- apt-mark unhold kubeadm - name: >- install-packages | Install kubeadm {{ version }} packages for Debian family apt: name: kubeadm={{ version }}-00 - update_cache: yes + update_cache: true state: present -- name: "upgrade-master | Hold kubeadm" - shell: >- - apt-mark hold kubeadm \ No newline at end of file +- name: upgrade-master | Hold kubeadm + command: >- + apt-mark hold kubeadm diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml index a4f8e1e5f3..5ecc31a6d7 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml @@ -5,9 +5,9 @@ changed_when: false # Unhold before removing to avoid error -- name: "install-packages | Unhold packages: {{ packages | join(', ') }}" - shell: >- - apt-mark unhold {{ packages | join(' ') }} +- name: "install-packages | Unhold packages: {{ packages | join( ', ' ) }}" + command: >- + apt-mark unhold {{ packages | join( ' ' ) }} vars: packages: >- {%- if ansible_facts.packages['kubernetes-cni'] is defined -%} @@ -25,8 +25,7 @@ when: ansible_facts.packages['kubelet'][0].version is version(version + '-00', '>') or ansible_facts.packages['kubectl'][0].version is version(version + '-00', '>') -- name: >- - install-packages | Install kubelet {{ version }} and kubectl {{ version }} packages for Debian family +- name: "install-packages | Install kubelet {{ version }} and kubectl {{ version }} packages for Debian family" apt: name: - kubelet={{ version }}-00 # removes (replaces) kubernetes-cni when full version is 1.17.7-00 but not when 1.17.7-01 @@ -35,5 +34,5 @@ state: present - name: install-packages | Hold kubelet and kubectl - shell: >- - apt-mark hold kubelet kubectl \ No newline at end of file + command: >- + apt-mark hold kubelet kubectl diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml index aba6b76e99..6e40ec6584 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml @@ -6,7 +6,7 @@ # Unhold before removing to avoid error - name: install-packages | Unhold kubelet, kubectl and kubernetes-cni - shell: >- + command: >- apt-mark unhold kubelet kubectl kubernetes-cni - name: install-packages | Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module @@ -17,8 +17,7 @@ or ansible_facts.packages['kubelet'][0].version is version (version + '-00', '>') or ansible_facts.packages['kubectl'][0].version is version (version + '-00', '>') -- name: >- - install-packages | Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} packages for Debian family +- name: "install-packages | Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} packages for Debian family" apt: name: - kubernetes-cni={{ cni_version }}-00 @@ -28,5 +27,5 @@ state: present - name: install-packages | Hold kubelet, kubectl and kubernetes-cni - shell: >- - apt-mark hold kubelet kubectl kubernetes-cni \ No newline at end of file + command: >- + apt-mark hold kubelet kubectl kubernetes-cni diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml index f32dccf7d4..a103b72292 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml @@ -1,9 +1,8 @@ --- -- name: >- - install-packages | Install kubeadm-{{ version }} package for RedHat family +- name: "install-packages | Install kubeadm-{{ version }} package for RedHat family" yum: name: kubeadm-{{ version }}-0 - update_cache: yes - allow_downgrade: yes + update_cache: true + allow_downgrade: true disable_excludes: kubernetes - state: present \ No newline at end of file + state: present diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml index 9586cda787..fbfb0ab907 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml @@ -1,11 +1,10 @@ --- -- name: >- - install-packages | Install kubelet-{{ version }} and kubectl-{{ version }} packages for RedHat family +- name: "install-packages | Install kubelet-{{ version }} and kubectl-{{ version }} packages for RedHat family" yum: name: - kubelet-{{ version }}-0 # removes (replaces) kubernetes-cni when full version is 1.17.7-0 but not when 1.17.7-1 - kubectl-{{ version }}-0 - update_cache: yes - allow_downgrade: yes + update_cache: true + allow_downgrade: true disable_excludes: kubernetes - state: present \ No newline at end of file + state: present diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml index bf45c95cca..3d33aa95c8 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml @@ -1,12 +1,11 @@ --- -- name: >- - install-packages | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }}, kubectl-{{ version }} packages for RedHat family +- name: "install-packages | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }}, kubectl-{{ version }} packages for RedHat family" yum: name: - kubernetes-cni-{{ cni_version }}-0 - kubelet-{{ version }}-0 - kubectl-{{ version }}-0 - update_cache: yes - allow_downgrade: yes + update_cache: true + allow_downgrade: true disable_excludes: kubernetes - state: present \ No newline at end of file + state: present diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml index 49b17d941b..d0dadd85c3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml @@ -22,6 +22,8 @@ mode: u=rwx,go=r - name: Upload and apply template + vars: + file_name: coredns-config-for-k8s-below-1.16.yml block: - name: upgrade-master | Upload {{ file_name }} file template: @@ -31,13 +33,9 @@ group: "{{ admin_user.name }}" mode: u=rw,go=r - - name: upgrade-master | Apply /etc/epiphany/manifests/{{ file_name }} file + - name: "upgrade-master | Apply /etc/epiphany/manifests/{{ file_name }} file" environment: KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: | + command: | kubectl apply \ -f /etc/epiphany/manifests/{{ file_name }} - args: - executable: /bin/bash - vars: - file_name: coredns-config-for-k8s-below-1.16.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml index a858f340f1..fc83ab252e 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml @@ -1,9 +1,9 @@ --- - name: Get cluster version environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" - shell: >- - set -o pipefail && + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + shell: | + set -o errexit -o pipefail kubectl version --short -o json | jq --raw-output '.serverVersion.gitVersion' register: cluster_version changed_when: false diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml index 5e37f67907..6ba9ea6832 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml @@ -1,12 +1,12 @@ --- - name: Get kubelet version from API server environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" - shell: >- + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + command: >- kubectl get node {{ inventory_hostname }} -o jsonpath='{.status.nodeInfo.kubeletVersion}' register: kubelet_version changed_when: false - name: Set kubelet version as fact set_fact: - kubelet_version: "{{ kubelet_version.stdout }}" \ No newline at end of file + kubelet_version: "{{ kubelet_version.stdout }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/drain.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/drain.yml deleted file mode 100644 index 423c00ee8f..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/drain.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -- name: Drain node in preparation for maintenance - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data - delegate_to: "{{ groups['kubernetes_master'][0] }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/uncordon.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/uncordon.yml deleted file mode 100644 index b6e44c6750..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/node/uncordon.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: Uncordon node - mark node as schedulable - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl uncordon {{ inventory_hostname }} - register: output - until: output is succeeded - retries: 5 - delay: 5 - delegate_to: "{{ groups['kubernetes_master'][0] }}" \ No newline at end of file diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-config.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml similarity index 76% rename from core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-config.yml rename to core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml index a0301ceb9c..0ce23e4cd3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-config.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml @@ -7,40 +7,34 @@ - name: upgrade-master | Check if encryption of secret data is enabled command: >- grep -- '--encryption-provider-config' /etc/kubernetes/manifests/kube-apiserver.yaml - register: shell_grep_encryption_flag + register: command_grep_encryption_flag changed_when: false - failed_when: shell_grep_encryption_flag.rc > 1 + failed_when: command_grep_encryption_flag.rc > 1 - name: upgrade-master | Patch kubeadm-config ConfigMap if needed when: - - shell_grep_encryption_flag.rc == 0 # encryption enabled + - command_grep_encryption_flag.rc == 0 # encryption enabled run_once: true # makes no sense to execute it more than once (would be redundant) + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config block: - name: Get kubeadm-config ConfigMap - shell: | + command: | kubectl get configmap kubeadm-config \ --namespace kube-system \ --output yaml - args: - executable: /bin/bash - environment: - KUBECONFIG: &KUBECONFIG /etc/kubernetes/admin.conf - register: shell_kubeadm_configmap + register: command_kubeadm_configmap changed_when: false # The following procedure ensures that etcd encryption is always enabled during subsequent kubeadm executions - - name: upgrade-master | Patch and re-apply the kubeadm-config ConfigMap + - name: upgrade-master | Patch kubeadm-config configmap (patch-kubeadm-etcd-encryption.yml) when: - _kubeadm_api_server_extra_args['encryption-provider-config'] is undefined - shell: | - kubectl apply \ + command: | + kubectl patch configmap kubeadm-config \ --namespace kube-system \ - --filename - \ - <<< "$KUBEADM_CONFIGMAP_DOCUMENT" - args: - executable: /bin/bash + --patch "$KUBEADM_CONFIGMAP_DOCUMENT" environment: - KUBECONFIG: *KUBECONFIG # Render an altered kubeadm-config configmap document KUBEADM_CONFIGMAP_DOCUMENT: >- {{ _document | combine(_update2, recursive=true) | to_nice_yaml(indent=2) }} @@ -48,7 +42,7 @@ vars: # Parse yaml payload _document: >- - {{ shell_kubeadm_configmap.stdout | from_yaml }} + {{ command_kubeadm_configmap.stdout | from_yaml }} # Extract cluster config _cluster_config: >- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index 2e5962b4e2..a6644e4bdf 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -1,47 +1,56 @@ --- -- name: Change keycloak stateful set to use {{ image_registry_address }} +- name: "Change keycloak stateful set to use {{ image_registry_address }}" + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config block: - name: upgrade-master | Get keycloak statefulset name - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}'| + kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | grep -i keycloak | awk '{print $1}' + vars: + _jsonpath: >- + '{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' changed_when: false register: keycloak_ss_name - args: + args: &args executable: /bin/bash + - name: upgrade-master | Get keycloak namespace - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}'| + kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | grep -i keycloak | awk '{print $1}' + vars: + _jsonpath: >- + '{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' changed_when: false register: keycloak_namespace - args: - executable: /bin/bash + args: *args + - name: upgrade-master | Get keycloak image - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} -o=jsonpath='{$.spec.template.spec.containers[:1].image}' + kubectl get statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} -o=jsonpath={{ _jsonpath }} + vars: + _jsonpath: >- + '{$.spec.template.spec.containers[:1].image}' changed_when: false register: keycloak_repository - args: - executable: /bin/bash when: - not keycloak_namespace.stdout == "" + - name: upgrade-master | Patch keycloak to use {{ image_registry_address }} - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - set -o pipefail && - kubectl patch statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} --patch '{"spec": {"template": { "spec": { "containers": [ { "image": "{{ image_registry_address }}/{{ keycloak_repository.stdout }}", "name": "{{ keycloak_ss_name.stdout }}" }]}}}}' - args: - executable: /bin/bash + kubectl patch statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} --patch '{{ _patch | to_json }}' + vars: + _patch: + spec: + template: + spec: + containers: + - name: "{{ keycloak_ss_name.stdout }}" + image: "{{ image_registry_address }}/{{ keycloak_repository.stdout }}" when: - not keycloak_namespace.stdout == "" - not image_registry_address in keycloak_repository.stdout diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml index 9b8d3bea2c..bbc071408f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml @@ -1,47 +1,56 @@ --- - name: Change rabbitmq stateful set to use {{ image_registry_address }} + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config block: - name: upgrade-master | Get rabbitmq statefulset name - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}'| + kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | grep -i rabbitmq | awk '{print $1}' + vars: + _jsonpath: >- + '{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' changed_when: false register: rabbit_mq_ss_name - args: + args: &args executable: /bin/bash + - name: upgrade-master | Get rabbitmq namespace - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}'| + kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | grep -i rabbitmq | awk '{print $1}' + vars: + _jsonpath: >- + '{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' changed_when: false register: rabbit_mq_namespace - args: - executable: /bin/bash + args: *args + - name: upgrade-master | Get rabbitmq image - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - kubectl get statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} -o=jsonpath='{$.spec.template.spec.containers[:1].image}' + kubectl get statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} -o=jsonpath={{ _jsonpath }} + vars: + _jsonpath: >- + '{$.spec.template.spec.containers[:1].image}' changed_when: false register: rabbit_mq_repository - args: - executable: /bin/bash when: - not rabbit_mq_namespace.stdout == "" + - name: upgrade-master | Patch rabbitmq to use {{ image_registry_address }} - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config shell: |- - set -o pipefail && - kubectl patch statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} --patch '{"spec": {"template": { "spec": { "containers": [ { "image": "{{ image_registry_address }}/{{ rabbit_mq_repository.stdout }}", "name": "{{ rabbit_mq_ss_name.stdout }}" }]}}}}' - args: - executable: /bin/bash + kubectl patch statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} --patch '{{ _patch | to_json }}' + vars: + _patch: + spec: + template: + spec: + containers: + - name: "{{ rabbit_mq_ss_name.stdout }}" + image: "{{ image_registry_address }}/{{ rabbit_mq_repository.stdout }}" when: - not rabbit_mq_namespace.stdout == "" - not image_registry_address in rabbit_mq_repository.stdout diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml index 83e2e2bc65..54a52146b0 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml @@ -1,25 +1,25 @@ --- - name: upgrade-master | Patch imageRepository in kubeadm-config ConfigMap + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config block: - name: upgrade-master | Get kubeadm-config configmap - shell: | + command: | kubectl get configmap kubeadm-config \ --namespace kube-system \ --output yaml - environment: - KUBECONFIG: &KUBECONFIG /etc/kubernetes/admin.conf - register: shell_kubeadm_configmap + register: command_kubeadm_configmap changed_when: false - name: upgrade-master | Patch kubeadm-config configmap (update-kubeadm-image-repository.yml) when: - _image_repository_updated != _image_repository # skip the task if nothing changed - shell: | + command: | kubectl patch configmap kubeadm-config \ --namespace kube-system \ --patch "$KUBEADM_CONFIGMAP_DOCUMENT" environment: - KUBECONFIG: *KUBECONFIG # Render an altered kubeadm-config configmap document KUBEADM_CONFIGMAP_DOCUMENT: >- {{ _document | combine(_update2, recursive=true) | to_nice_yaml(indent=2) }} @@ -27,7 +27,7 @@ vars: # Parse yaml payload _document: >- - {{ shell_kubeadm_configmap.stdout | from_yaml }} + {{ command_kubeadm_configmap.stdout | from_yaml }} # Extract cluster config _cluster_config: >- @@ -37,7 +37,7 @@ {{ _cluster_config.imageRepository }} _image_repository_updated: >- - {%- if _image_repository is search(':') -%} + {%- if _image_repository is search( ':' ) -%} {{ _image_repository | regex_replace('^(?P.+):(?P\d+)', image_registry_address) }} {%- else -%} {{ image_registry_address }}/{{ _image_repository }} diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml index a85237a55d..5beda7a980 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml @@ -1,4 +1,5 @@ +--- - name: Apply latest coredns import_role: name: kubernetes_master - tasks_from: apply-coredns \ No newline at end of file + tasks_from: apply-coredns diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml index c13c94866c..8f0bbc41ae 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml @@ -1,4 +1,12 @@ --- -- name: upgrade-kubeadm-config | Save kubeadm-config ConfigMap to file - shell: >- - kubeadm config view > /etc/kubeadm/kubeadm-config.yml +- name: upgrade-kubeadm-config | Check if /etc/kubeadm/kubeadm-config.yml exists + stat: + path: /etc/kubeadm/kubeadm-config.yml + register: kubeadm_config_file + changed_when: false + +- when: kubeadm_config_file.stat.exists + block: + - name: upgrade-kubeadm-config | Save kubeadm-config ConfigMap to file + command: >- + kubeadm config view > /etc/kubeadm/kubeadm-config.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml index d4bd9efbfa..9f4d358f24 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml @@ -1,40 +1,41 @@ --- # Delete old kubernetes-dashboard from kube-system, new dashboard has its own namespace - TODO remove this block in 0.7.0 -- name: Check if any resource with label 'k8s-app=kubernetes-dashboard' exists in kube-system +- name: uprgade-kubernetes-dashboard | Delete old kubernetes dashboard + run_once: true environment: KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl get all -l k8s-app=kubernetes-dashboard -n kube-system - register: result - changed_when: false + block: + - name: Check if any resource with label 'k8s-app=kubernetes-dashboard' exists in kube-system + command: | + kubectl get all -l k8s-app=kubernetes-dashboard -n kube-system + register: result + changed_when: false -- name: Delete all resources with label 'k8s-app=kubernetes-dashboard' from kube-system - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl delete all -l k8s-app=kubernetes-dashboard -n kube-system - when: - - not 'No resources found' in result.stderr + - name: Delete all resources with label 'k8s-app=kubernetes-dashboard' from kube-system + command: | + kubectl delete all -l k8s-app=kubernetes-dashboard -n kube-system + when: + - not 'No resources found' in result.stderr -- name: Check if 'kubernetes-dashboard-minimal' Role or RoleBinding exists in kube-system - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl get Role,RoleBinding kubernetes-dashboard-minimal -n kube-system - register: result - changed_when: false - failed_when: - - result.rc != 0 - - not 'not found' in result.stderr + - name: Check if 'kubernetes-dashboard-minimal' Role or RoleBinding exists in kube-system + command: | + kubectl get Role,RoleBinding kubernetes-dashboard-minimal -n kube-system + register: result + changed_when: false + failed_when: + - result.rc != 0 + - not 'not found' in result.stderr -- name: Delete 'kubernetes-dashboard-minimal' Role and RoleBinding from kube-system - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl delete Role,RoleBinding kubernetes-dashboard-minimal -n kube-system - when: - - not 'not found' in result.stderr + - name: Delete 'kubernetes-dashboard-minimal' Role and RoleBinding from kube-system + command: | + kubectl delete Role,RoleBinding kubernetes-dashboard-minimal -n kube-system + when: + - not 'not found' in result.stderr # Deploy new version of kubernetes-dashboard - name: Apply Kubernetes Dashboard import_role: name: kubernetes_master - tasks_from: apply-dashboard \ No newline at end of file + tasks_from: apply-dashboard diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index 9d8d2e71aa..26a775c84c 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -1,12 +1,6 @@ --- - name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml - -- name: upgrade-master | Check if /etc/kubeadm/kubeadm-config.yml exists - stat: - path: /etc/kubeadm/kubeadm-config.yml - changed_when: false - register: kubeadm_config_file + include_tasks: utils/wait.yml # This resolves issues (related to the etcd encryption) causing upgrades to hang. # Legacy clusters may have incomplete configs, thus it is corrected here, before any `kubeadm upgrade` command is executed. @@ -14,7 +8,7 @@ # In turn, this causes Kuberentes components such as the controller-manager to lose ability to read internal (kube-system) secrets, then # any upgrade attempt freezes and the cluster at hand becomes unusable. - name: upgrade-master | Make sure the etcd encryption feature is properly configured (if enabled) - import_tasks: patch-kubeadm-config.yml + include_tasks: patch-kubeadm-etcd-encryption.yml - name: upgrade-master | Update imageRepository in kubeadm-config ConfigMap include_tasks: update-kubeadm-image-repository.yml @@ -29,20 +23,12 @@ include_tasks: upgrade-kubernetes-dashboard.yml - name: upgrade-master | Drain master in preparation for maintenance - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data - when: - - groups.kubernetes_node is defined - - groups.kubernetes_node | length > 0 # drain master only if there is at least one worker node - -- name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/drain.yml - name: upgrade-master | Upgrade, configure packages block: - name: upgrade-master | Hold packages for Debian family - include_tasks: "Debian/hold-packages.yml" + include_tasks: Debian/hold-packages.yml when: ansible_os_family == "Debian" - name: upgrade-master | Install kubeadm @@ -62,39 +48,41 @@ - version is version('1.16.0', '<') - name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/wait.yml # Note: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended since v1.16 - name: upgrade-master | Validate whether cluster is upgradeable # Ignore CoreDNSUnsupportedPlugins error since coredns-migration does not support 'hosts' plugin. # This issue is fixed in K8s v1.18, see https://github.com/kubernetes/kubernetes/pull/88482 - shell: >- + command: >- {%- if version is version('1.18.0', '>=') -%} kubeadm upgrade plan v{{ version }} {%- else -%} kubeadm upgrade plan v{{ version }} --ignore-preflight-errors=CoreDNSUnsupportedPlugins {%- endif -%} - changed_when: false register: result - until: result is succeeded + until: + - result is succeeded retries: 20 delay: 30 + changed_when: false # Note: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended since v1.16 - name: upgrade-master | Upgrade K8s cluster to v{{ version }} # Ignore CoreDNSUnsupportedPlugins error since coredns-migration does not support 'hosts' plugin. # This issue is fixed in K8s v1.18, see https://github.com/kubernetes/kubernetes/pull/88482 - shell: >- + command: >- {%- if version is version('1.18.0', '>=') -%} kubeadm upgrade apply -y v{{ version }} {%- else -%} kubeadm upgrade apply -y v{{ version }} --ignore-preflight-errors=CoreDNSUnsupportedPlugins {%- endif -%} - changed_when: false register: result - until: result is succeeded + until: + - result is succeeded retries: 20 delay: 30 + changed_when: false when: - cluster_version is version('v' + version, '<') @@ -105,10 +93,11 @@ {%- else -%} {{ ansible_os_family }}/install-packages-cni-in-kubelet.yml {%- endif -%} - when: result is succeeded + when: + - result is succeeded - name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/wait.yml # 'kubeadm upgrade apply' overwrites Epiphany's customized CoreDNS deployment so we restore it. # This task should be run each time K8s is upgraded to the latest version. @@ -120,48 +109,12 @@ - name: upgrade-master | Upgrade kubeadm-config.yml if exists include_tasks: upgrade-kubeadm-config.yml - when: kubeadm_config_file.stat.exists - name: upgrade-master | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-master | Stop Kubelet - systemd: - state: stopped - name: kubelet - -- name: upgrade-master | Stop Docker - systemd: - state: stopped - name: docker - -- name: upgrade-master | Reload daemon - systemd: - daemon_reload: yes - -- name: upgrade-master | Start Docker - systemd: - name: docker - state: started - enabled: yes - -- name: upgrade-master | Start Kubelet - systemd: - name: kubelet - state: started - enabled: yes - -- name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml +- name: upgrade-master | Reload kubelet and docker + include_tasks: utils/reload-kubelet-and-docker.yml - name: upgrade-master | Uncordon master - mark master as schedulable - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl uncordon {{ inventory_hostname }} - register: output - until: output is succeeded - retries: 20 - delay: 5 - when: - - groups.kubernetes_node is defined - - groups.kubernetes_node | length > 0 # master is drained only if there is at least one worker node + include_tasks: utils/uncordon.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml index 53c0afaa8e..e0b0885c05 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml @@ -1,44 +1,25 @@ --- -- name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml - -- name: upgrade-master | Check if /etc/kubeadm/kubeadm-config.yml exists - stat: - path: /etc/kubeadm/kubeadm-config.yml - changed_when: false - register: kubeadm_config_file - - name: upgrade-master | Drain master in preparation for maintenance - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data - when: - - groups.kubernetes_node is defined - - groups.kubernetes_node | length > 0 # drain master only if there is at least one worker node - -- name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/drain.yml - name: upgrade-master | Upgrade, configure packages block: - name: upgrade-master | Hold packages for Debian family - include_tasks: "Debian/hold-packages.yml" + include_tasks: Debian/hold-packages.yml when: ansible_os_family == "Debian" - name: upgrade-master | Install kubeadm include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - - name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml - - name: upgrade-master | Upgrade master {{ inventory_hostname }} - shell: >- + command: >- kubeadm upgrade node - changed_when: false register: result - until: result is succeeded + until: + - result is succeeded retries: 20 delay: 30 + changed_when: false - name: Install kubelet and kubectl for {{ version }} include_tasks: >- @@ -47,55 +28,20 @@ {%- else -%} {{ ansible_os_family }}/install-packages-cni-in-kubelet.yml {%- endif -%} - when: result is succeeded + when: + - result is succeeded - name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/wait.yml - name: upgrade-master | Upgrade kubeadm-config.yml if exists include_tasks: upgrade-kubeadm-config.yml - when: kubeadm_config_file.stat.exists - name: upgrade-master | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-master | Stop Kubelet - systemd: - state: stopped - name: kubelet - -- name: upgrade-master | Stop Docker - systemd: - state: stopped - name: docker - -- name: upgrade-master | Reload daemon - systemd: - daemon_reload: yes - -- name: upgrade-master | Start Docker - systemd: - name: docker - state: started - enabled: yes - -- name: upgrade-master | Start Kubelet - systemd: - name: kubelet - state: started - enabled: yes - -- name: upgrade-master | Wait for cluster's readiness - include_tasks: wait.yml +- name: upgrade-master | Reload kubelet and docker + include_tasks: utils/reload-kubelet-and-docker.yml - name: upgrade-master | Uncordon master - mark master as schedulable - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl uncordon {{ inventory_hostname }} - register: output - until: output is succeeded - retries: 20 - delay: 5 - when: - - groups.kubernetes_node is defined - - groups.kubernetes_node | length > 0 # master is drained only if there is at least one worker node + include_tasks: utils/uncordon.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml index 7b5b38c4da..17d9145195 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml @@ -1,56 +1,51 @@ --- - -- name: upgrade-master | Set default cni plugin - flannel - set_fact: - plugin: "flannel" - -- name: upgrade-master | Wait for API resources to propagate - shell: | - kubectl api-resources --cached=false && kubectl -n kube-system get daemonsets +- name: upgrade-master | Determine which network plugin is used + run_once: true environment: KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - changed_when: false - register: daemonsets_query_result - until: daemonsets_query_result is success - retries: 20 - delay: 30 + block: + - name: upgrade-master | Set default cni plugin - flannel + set_fact: + plugin: "flannel" -- name: upgrade-master | If canal is installed on the cluster - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: |- - kubectl -n kube-system get daemonsets -l k8s-app=canal - changed_when: false - register: canal_query_result - args: - executable: /bin/bash + - name: upgrade-master | Wait for API resources to propagate + shell: >- + kubectl api-resources --cached=false && kubectl -n kube-system get daemonsets + register: daemonsets_query_result + until: + - daemonsets_query_result is success + retries: 20 + delay: 30 + changed_when: false -- name: upgrade-master | Set network plugin variable to canal - set_fact: - plugin: "canal" - when: - - '"canal" in canal_query_result.stdout' + - name: upgrade-master | If canal is installed on the cluster + command: >- + kubectl -n kube-system get daemonsets -l k8s-app=canal + register: canal_query_result + changed_when: false -- name: upgrade-master | If calico is installed on the cluster - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: |- - kubectl -n kube-system get daemonsets -l k8s-app=calico-node - changed_when: false - register: calico_query_result - args: - executable: /bin/bash + - name: upgrade-master | Set network plugin variable to canal + set_fact: + plugin: "canal" + when: + - '"canal" in canal_query_result.stdout' + + - name: upgrade-master | If calico is installed on the cluster + command: >- + kubectl -n kube-system get daemonsets -l k8s-app=calico-node + register: calico_query_result + changed_when: false -- name: upgrade-master | Set network plugin variable to calico - set_fact: - plugin: "calico" - when: - - '"calico" in calico_query_result.stdout' + - name: upgrade-master | Set network plugin variable to calico + set_fact: + plugin: "calico" + when: + - '"calico" in calico_query_result.stdout' -- name: Apply network plugin configured by user {{ plugin }} +- name: "Apply network plugin configured by user {{ plugin }}" import_role: name: kubernetes_master tasks_from: apply-network-components vars: network_plugin: "{{ plugin }}" - k8s_server_version: "{{ cluster_version }}" \ No newline at end of file + k8s_server_version: "{{ cluster_version }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml index 50d0bcd05f..ccedaed2e0 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml @@ -1,12 +1,6 @@ --- -- name: upgrade-node | Wait for cluster's readiness - include_tasks: wait.yml - - name: upgrade-node | Drain node in preparation for maintenance - include_tasks: node/drain.yml - -- name: upgrade-node | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/drain.yml - name: upgrade-node | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml @@ -21,7 +15,13 @@ include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - name: upgrade-node | Upgrade node config - shell: kubeadm upgrade node config --kubelet-version v{{ version }} + command: >- + kubeadm upgrade node config --kubelet-version v{{ version }} + register: result + until: + - result is succeeded + retries: 20 + delay: 30 - name: upgrade-node | Install packages include_tasks: >- @@ -34,14 +34,14 @@ - name: upgrade-node | Restart kubelet systemd: state: restarted - daemon_reload: yes + daemon_reload: true name: kubelet - name: upgrade-node | Wait for cluster's readiness - include_tasks: wait.yml + include_tasks: utils/wait.yml - name: upgrade-node | Uncordon node - mark node as schedulable - include_tasks: node/uncordon.yml + include_tasks: utils/uncordon.yml - name: upgrade-node | Verify component versions and node status - include_tasks: verify-upgrade.yml \ No newline at end of file + include_tasks: verify-upgrade.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml new file mode 100644 index 0000000000..c3917efc1f --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml @@ -0,0 +1,19 @@ +--- +- name: upgrade | Wait for cluster's readiness + include_tasks: kubernetes/utils/wait.yml + +- when: + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # drain only if there is at least one worker node + block: + - delegate_to: "{{ groups.kubernetes_master[0] }}" + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + block: + - name: upgrade | Drain master or node in preparation for maintenance + command: >- + kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data + + - name: upgrade | Wait for cluster's readiness + include_tasks: kubernetes/utils/wait.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml new file mode 100644 index 0000000000..31b31c38e1 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml @@ -0,0 +1,26 @@ +--- +- name: upgrade | Stop Kubelet + systemd: + state: stopped + name: kubelet + +- name: upgrade | Stop Docker + systemd: + state: stopped + name: docker + +- name: upgrade | Reload daemon + systemd: + daemon_reload: true + +- name: upgrade | Start Docker + systemd: + name: docker + state: started + enabled: true + +- name: upgrade | Start Kubelet + systemd: + name: kubelet + state: started + enabled: true diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml new file mode 100644 index 0000000000..7d33869f9c --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml @@ -0,0 +1,24 @@ +--- +- name: upgrade | Wait for cluster's readiness + include_tasks: kubernetes/utils/wait.yml + +- when: + - groups.kubernetes_node is defined + - groups.kubernetes_node | length > 0 # master or node is drained only if there is at least one worker node + block: + - delegate_to: "{{ groups.kubernetes_master[0] }}" + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + block: + - name: upgrade | Uncordon master or node - mark it as schedulable + command: >- + kubectl uncordon {{ inventory_hostname }} + register: result + until: + - result is succeeded + retries: 20 + delay: 5 + + - name: upgrade | Wait for cluster's readiness + include_tasks: kubernetes/utils/wait.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml new file mode 100644 index 0000000000..45b2c71576 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml @@ -0,0 +1,15 @@ +--- +- delegate_to: "{{ groups.kubernetes_master[0] }}" + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + block: + - name: Wait for kubectl to access K8s cluster + command: >- + kubectl cluster-info + register: result + until: + - result is succeeded and "running" in result.stdout + retries: 60 # 1min + delay: 1 + changed_when: false diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml new file mode 100644 index 0000000000..9a4dac7bc6 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml @@ -0,0 +1,43 @@ +--- +- delegate_to: "{{ groups.kubernetes_master[0] }}" + run_once: true + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + block: + - name: Wait for kubectl to find and access K8s cluster + include_tasks: kubernetes/utils/wait-for-kube-apiserver.yml + + - name: Wait for all nodes to be ready + command: >- + kubectl get nodes -o json + register: result + until: + - result is succeeded + - result.stdout | from_json | json_query("items[*].status.conditions[?(@.type=='Ready')].status[]") | unique == ["True"] + retries: 600 # 20min + delay: 2 + changed_when: false + + - when: wait_for_pods | default(false) | bool + block: + - name: Wait for all pods to be running + command: >- + kubectl get pods --all-namespaces -o json + register: result + until: + - result is succeeded + - result.stdout | from_json | json_query('items[*].status.phase') | unique == ["Running"] + retries: 600 # 20min + delay: 2 + changed_when: false + + - name: Wait for all pods to be ready + command: >- + kubectl get pods --all-namespaces -o json + register: result + until: + - result is succeeded + - result.stdout | from_json | json_query('items[*].status.conditions[].status') | unique == ["True"] + retries: 600 # 20min + delay: 2 + changed_when: false diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml index dd2b47f951..326bd23009 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml @@ -1,65 +1,65 @@ --- -- name: Verify cluster version +- name: verify-upgrade | Verify cluster versions + environment: + KUBECONFIG: /home/{{ admin_user.name }}/.kube/config block: - - name: verify-upgrade | Include wait-for-kube-apiserver.yml - include_tasks: wait-for-kube-apiserver.yml + - name: Verify cluster version + when: + - inventory_hostname in groups.kubernetes_master + block: + - name: verify-upgrade | Include wait-for-kube-apiserver.yml + include_tasks: utils/wait-for-kube-apiserver.yml - - name: verify-upgrade | Include get-cluster-version.yml - include_tasks: get-cluster-version.yml # sets cluster_version + - name: verify-upgrade | Include get-cluster-version.yml + include_tasks: get-cluster-version.yml # sets cluster_version - - name: verify-upgrade | Verify cluster version - assert: - that: "'{{ version }}' in cluster_version" - when: - - inventory_hostname in groups['kubernetes_master'] + - name: verify-upgrade | Verify cluster version + assert: + that: version in cluster_version -- name: Verify kubectl version - block: - - name: verify-upgrade | Get kubectl version - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: >- - set -o pipefail && - kubectl version --client --short -o json | jq --raw-output '.clientVersion.gitVersion' - changed_when: false - register: kubectl_version - args: - executable: /bin/bash + - name: Verify kubectl version + block: + - name: verify-upgrade | Get kubectl version + shell: |- + set -o pipefail && + kubectl version --client --short -o json | jq --raw-output '.clientVersion.gitVersion' + register: kubectl_version + args: &args + executable: /bin/bash + changed_when: false - - name: verify-upgrade | Verify kubectl version - assert: - that: "'{{ version }}' in kubectl_version.stdout" + - name: verify-upgrade | Verify kubectl version + assert: + that: version in kubectl_version.stdout -- name: Verify kubeadm version - block: - - name: verify-upgrade | Get kubeadm version - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubeadm version -o short - changed_when: false - register: kubeadm_version + - name: Verify kubeadm version + block: + - name: verify-upgrade | Get kubeadm version + command: >- + kubeadm version -o short + register: kubeadm_version + changed_when: false - - name: verify-upgrade | Verify kubeadm version - assert: - that: "'{{ version }}' in kubeadm_version.stdout" + - name: verify-upgrade | Verify kubeadm version + assert: + that: version in kubeadm_version.stdout -- name: verify-upgrade | Verify kubelet version from API server and get node status - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: |- - set -o pipefail && - kubectl get nodes {{ inventory_hostname }} | - # get values only for STATUS and VERSION columns, example output: 'Ready v1.14.6' - awk 'NR==1 { for (col=1; col<=NF; col++) { columns[$col] = col } }; - NR>1 { print $columns["STATUS"], $columns["VERSION"] }' - changed_when: false - register: node_status_and_version - until: version in node_status_and_version.stdout - retries: 30 # 1min - delay: 2 - args: - executable: /bin/bash + - name: verify-upgrade | Verify kubelet version from API server and get node status + run_once: true + shell: |- + set -o pipefail && + kubectl get nodes {{ inventory_hostname }} | + # get values only for STATUS and VERSION columns, example output: 'Ready v1.14.6' + awk 'NR==1 { for (col=1; col<=NF; col++) { columns[$col] = col } }; + NR>1 { print $columns["STATUS"], $columns["VERSION"] }' + register: node_status_and_version + until: + - version in node_status_and_version.stdout + retries: 30 # 1min + delay: 2 + args: *args + changed_when: false -- name: verify-upgrade | Verify node status - assert: - that: "'Ready' in node_status_and_version.stdout" + - name: verify-upgrade | Verify node status + assert: + that: "'Ready' in node_status_and_version.stdout" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait-for-kube-apiserver.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait-for-kube-apiserver.yml deleted file mode 100644 index 4a3648e8a3..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait-for-kube-apiserver.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Wait for kube-apiserver, e.g. after Docker service was restarted on master - -- name: Wait for kubectl to access K8s cluster - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl cluster-info - register: output - until: output is succeeded and "running" in output.stdout - retries: 60 # 1min - delay: 1 - changed_when: false \ No newline at end of file diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait.yml deleted file mode 100644 index f116f71e7f..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/wait.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: Wait for kubectl to find and access K8s cluster - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl cluster-info - register: output - until: - - output is succeeded - - "'running' in output.stdout" - retries: 30 # 1min - delay: 2 - changed_when: false - -- name: Wait for all nodes to be ready - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl get nodes -o json - register: output - until: - - output is succeeded - - output.stdout|from_json|json_query("items[*].status.conditions[?(@.type=='Ready')].status[]")|unique == ["True"] - retries: 600 # 20min - delay: 2 - changed_when: false - -- name: Wait for all pods to be running - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl get pods --all-namespaces -o json - register: output - until: - - output is succeeded - - output.stdout|from_json|json_query('items[*].status.phase')|unique == ["Running"] - retries: 600 # 20min - delay: 2 - changed_when: false - when: wait_for_pods|default(false)|bool - -- name: Wait for all pods to be ready - environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: kubectl get pods --all-namespaces -o json - register: output - until: - - output is succeeded - - output.stdout|from_json|json_query('items[*].status.conditions[].status')|unique == ["True"] - retries: 600 # 20min - delay: 2 - changed_when: false - when: wait_for_pods|default(false)|bool \ No newline at end of file From 5aa91766e21642a68422dda5354a0cf0ec905b69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Opala?= Date: Thu, 16 Jul 2020 09:34:34 +0200 Subject: [PATCH 04/19] Apply suggestions from code review Co-authored-by: to-bar <46519524+to-bar@users.noreply.github.com> --- .../engine/ansible/AnsibleInventoryUpgrade.py | 4 +-- .../roles/upgrade/tasks/kubernetes.yml | 3 ++- .../tasks/kubernetes/Debian/hold-packages.yml | 2 +- .../kubernetes/Debian/install-kubeadm.yml | 6 ++--- .../kubernetes/RedHat/install-kubeadm.yml | 2 +- .../install-packages-cni-in-kubelet.yml | 2 +- .../kubernetes/RedHat/install-packages.yml | 2 +- .../tasks/kubernetes/get-cluster-version.yml | 4 +-- .../reconfigure-auth-service-app.yml | 2 +- .../tasks/kubernetes/upgrade-master0.yml | 3 +++ .../tasks/kubernetes/upgrade-masterN.yml | 25 +++++++++++-------- 11 files changed, 31 insertions(+), 24 deletions(-) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index 70e8f06e7c..17c86dc2f1 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -4,8 +4,8 @@ from ansible.inventory.manager import InventoryManager from cli.helpers.Step import Step -from cli.helpers.build_saver import get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY -from cli.helpers.build_saver import save_inventory, MANIFEST_FILE_NAME +from cli.helpers.build_saver import (get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY, + save_inventory, MANIFEST_FILE_NAME) from cli.helpers.data_loader import load_yamls_file from cli.helpers.objdict_helpers import dict_to_objdict from cli.helpers.doc_list_helpers import select_single diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml index 556117d5cb..55c4e156aa 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml @@ -1,5 +1,6 @@ --- -- delegate_to: "{{ groups.kubernetes_master[0] }}" +- name: Wait for kube-apiserver then get cluster and kubelet version + delegate_to: "{{ groups.kubernetes_master[0] }}" block: - name: Include wait-for-kube-apiserver.yml import_tasks: kubernetes/utils/wait-for-kube-apiserver.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml index 2e98e862c1..406236d788 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml @@ -1,5 +1,5 @@ --- -- name: "upgrade-master | Hold packages: {{ packages | join( ', ' ) }}" +- name: Hold packages: {{ packages | join( ', ' ) }} command: >- apt-mark hold {{ packages | join( ' ' ) }} vars: diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml index eadc4ca0f8..2f8c6b80c3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml @@ -1,15 +1,15 @@ --- -- name: upgrade-master | Unhold kubeadm +- name: Unhold kubeadm package command: >- apt-mark unhold kubeadm - name: >- - install-packages | Install kubeadm {{ version }} packages for Debian family + Install kubeadm {{ version }} package for Debian family apt: name: kubeadm={{ version }}-00 update_cache: true state: present -- name: upgrade-master | Hold kubeadm +- name: Hold kubeadm package command: >- apt-mark hold kubeadm diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml index a103b72292..8a1f0b1c17 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml @@ -1,5 +1,5 @@ --- -- name: "install-packages | Install kubeadm-{{ version }} package for RedHat family" +- name: Install kubeadm-{{ version }} package for RedHat family yum: name: kubeadm-{{ version }}-0 update_cache: true diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml index fbfb0ab907..2c53a02504 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml @@ -1,5 +1,5 @@ --- -- name: "install-packages | Install kubelet-{{ version }} and kubectl-{{ version }} packages for RedHat family" +- name: "install-packages | Install kubelet-{{ version }} and kubectl-{{ version }} for RedHat family" yum: name: - kubelet-{{ version }}-0 # removes (replaces) kubernetes-cni when full version is 1.17.7-0 but not when 1.17.7-1 diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml index 3d33aa95c8..4f88ce4ce8 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml @@ -1,5 +1,5 @@ --- -- name: "install-packages | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }}, kubectl-{{ version }} packages for RedHat family" +- name: "install-packages | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }} and kubectl-{{ version }} for RedHat family" yum: name: - kubernetes-cni-{{ cni_version }}-0 diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml index fc83ab252e..7babec93cc 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml @@ -2,8 +2,8 @@ - name: Get cluster version environment: KUBECONFIG: /home/{{ admin_user.name }}/.kube/config - shell: | - set -o errexit -o pipefail + shell: >- + set -o pipefail && kubectl version --short -o json | jq --raw-output '.serverVersion.gitVersion' register: cluster_version changed_when: false diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index a6644e4bdf..09d1b4b445 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -1,5 +1,5 @@ --- -- name: "Change keycloak stateful set to use {{ image_registry_address }}" +- name: Change keycloak statefulset to use {{ image_registry_address }} run_once: true environment: KUBECONFIG: /home/{{ admin_user.name }}/.kube/config diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index 26a775c84c..70735d40ce 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -118,3 +118,6 @@ - name: upgrade-master | Uncordon master - mark master as schedulable include_tasks: utils/uncordon.yml + +- name: upgrade-master | Verify component versions and node status + include_tasks: kubernetes/verify-upgrade.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml index e0b0885c05..7a6d18af60 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml @@ -1,17 +1,17 @@ --- -- name: upgrade-master | Drain master in preparation for maintenance +- name: upgrade-masterN | Drain master in preparation for maintenance include_tasks: utils/drain.yml -- name: upgrade-master | Upgrade, configure packages +- name: Upgrade, configure packages block: - - name: upgrade-master | Hold packages for Debian family + - name: upgrade-masterN | Hold packages for Debian family include_tasks: Debian/hold-packages.yml when: ansible_os_family == "Debian" - - name: upgrade-master | Install kubeadm + - name: upgrade-masterN | Install kubeadm include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - - name: upgrade-master | Upgrade master {{ inventory_hostname }} + - name: upgrade-masterN | Upgrade master {{ inventory_hostname }} command: >- kubeadm upgrade node register: result @@ -21,7 +21,7 @@ delay: 30 changed_when: false - - name: Install kubelet and kubectl for {{ version }} + - name: name: upgrade-masterN | Install kubelet and kubectl for {{ version }} include_tasks: >- {%- if cni_in_kubelet is undefined or not cni_in_kubelet -%} {{ ansible_os_family }}/install-packages.yml @@ -31,17 +31,20 @@ when: - result is succeeded -- name: upgrade-master | Wait for cluster's readiness +- name: upgrade-masterN | Wait for cluster's readiness include_tasks: utils/wait.yml -- name: upgrade-master | Upgrade kubeadm-config.yml if exists +- name: upgrade-masterN | Upgrade kubeadm-config.yml if exists include_tasks: upgrade-kubeadm-config.yml -- name: upgrade-master | Upgrade Docker # this may restart Docker daemon +- name: upgrade-masterN | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-master | Reload kubelet and docker +- name: upgrade-masterN | Reload kubelet and docker include_tasks: utils/reload-kubelet-and-docker.yml -- name: upgrade-master | Uncordon master - mark master as schedulable +- name: upgrade-masterN | Uncordon master - mark master as schedulable include_tasks: utils/uncordon.yml + +- name: upgrade-masterN | Verify component versions and node status + include_tasks: kubernetes/verify-upgrade.yml From 1c1edd1e997eb82020a744a44d23a6dfc939dcf5 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 15 Jul 2020 13:24:26 +0200 Subject: [PATCH 05/19] upgrade: removing unneeded kubeconfig from k8s nodes (security fix) --- .../tasks/apply-dashboard.yml | 3 +-- .../tasks/apply-network-components.yml | 2 +- .../tasks/cni-plugins/calico.yml | 4 ++-- .../tasks/cni-plugins/canal.yml | 4 ++-- .../tasks/cni-plugins/flannel.yml | 4 ++-- .../tasks/deployments/apply-file.yml | 2 +- .../roles/upgrade/tasks/ensure-kubeconfig.yml | 20 ------------------- .../tasks/kubernetes/downgrade-coredns.yml | 2 +- .../tasks/kubernetes/get-cluster-version.yml | 2 +- .../tasks/kubernetes/get-kubelet-version.yml | 2 +- .../patch-kubeadm-etcd-encryption.yml | 2 +- .../reconfigure-auth-service-app.yml | 2 +- .../kubernetes/reconfigure-rabbitmq-app.yml | 2 +- .../update-kubeadm-image-repository.yml | 2 +- .../upgrade-kubernetes-dashboard.yml | 2 +- .../kubernetes/upgrade-network-components.yml | 2 +- .../upgrade/tasks/kubernetes/utils/drain.yml | 2 +- .../tasks/kubernetes/utils/uncordon.yml | 2 +- .../utils/wait-for-kube-apiserver.yml | 2 +- .../upgrade/tasks/kubernetes/utils/wait.yml | 2 +- .../tasks/kubernetes/verify-upgrade.yml | 2 +- .../upgrade/tasks/provide-kubeconfig.yml | 12 ----------- .../data/common/ansible/playbooks/upgrade.yml | 11 ---------- 23 files changed, 23 insertions(+), 67 deletions(-) delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/ensure-kubeconfig.yml delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/provide-kubeconfig.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-dashboard.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-dashboard.yml index 1ffc5c4edd..98e17ec1a3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-dashboard.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-dashboard.yml @@ -5,9 +5,8 @@ include_tasks: deployments/deploy-template.yml - name: Check if kubernetes-dashboard is already deployed - become_user: "{{ admin_user.name }}" environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + KUBECONFIG: /etc/kubernetes/admin.conf shell: | kubectl get pods \ --namespace kubernetes-dashboard \ diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml index bff0047e1f..d868adc148 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml @@ -10,7 +10,7 @@ args: executable: /bin/bash environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf register: wait_for_cni_plugin until: wait_for_cni_plugin is succeeded retries: 30 diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/calico.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/calico.yml index 581e617259..3dac94ee26 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/calico.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/calico.yml @@ -47,5 +47,5 @@ - name: Apply calico definition environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" - command: kubectl apply -f /home/{{ admin_user.name }}/calico.yml \ No newline at end of file + KUBECONFIG: /etc/kubernetes/admin.conf + command: kubectl apply -f /home/{{ admin_user.name }}/calico.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/canal.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/canal.yml index 13112e9904..5d2cd57a39 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/canal.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/canal.yml @@ -15,5 +15,5 @@ - name: Apply canal deployment environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" - command: kubectl apply -f /home/{{ admin_user.name }}/canal.yml \ No newline at end of file + KUBECONFIG: /etc/kubernetes/admin.conf + command: kubectl apply -f /home/{{ admin_user.name }}/canal.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/flannel.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/flannel.yml index ac4faa504b..4eb639b32d 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/flannel.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/flannel.yml @@ -8,5 +8,5 @@ - name: Apply flannel definition environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" - command: kubectl apply -f /home/{{ admin_user.name }}/kube-flannel.yml \ No newline at end of file + KUBECONFIG: /etc/kubernetes/admin.conf + command: kubectl apply -f /home/{{ admin_user.name }}/kube-flannel.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml index 271203f321..5a762ffd48 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml @@ -1,7 +1,7 @@ --- - name: "Apply /etc/epiphany/manifests/{{ file_name }} file" environment: - KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + KUBECONFIG: /etc/kubernetes/admin.conf shell: | kubectl apply \ -f /etc/epiphany/manifests/{{ file_name }} diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/ensure-kubeconfig.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/ensure-kubeconfig.yml deleted file mode 100644 index dfcba5accc..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/ensure-kubeconfig.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Create .kube - file: - path: "/home/{{ admin_user.name }}/.kube/" - state: directory - owner: "{{ admin_user.name }}" - group: "{{ admin_user.name }}" - -- name: Create .kube/config - copy: - src: /etc/kubernetes/admin.conf - dest: "/home/{{ admin_user.name }}/.kube/config" - remote_src: true - owner: "{{ admin_user.name }}" - group: "{{ admin_user.name }}" - -- name: Slurp .kube/config - slurp: - src: "/home/{{ admin_user.name }}/.kube/config" - register: kube_config diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml index d0dadd85c3..123e08e1ab 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml @@ -35,7 +35,7 @@ - name: "upgrade-master | Apply /etc/epiphany/manifests/{{ file_name }} file" environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf command: | kubectl apply \ -f /etc/epiphany/manifests/{{ file_name }} diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml index 7babec93cc..5e4fe6e23a 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml @@ -1,7 +1,7 @@ --- - name: Get cluster version environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf shell: >- set -o pipefail && kubectl version --short -o json | jq --raw-output '.serverVersion.gitVersion' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml index 6ba9ea6832..42f85e6aa3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-kubelet-version.yml @@ -1,7 +1,7 @@ --- - name: Get kubelet version from API server environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf command: >- kubectl get node {{ inventory_hostname }} -o jsonpath='{.status.nodeInfo.kubeletVersion}' register: kubelet_version diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml index 0ce23e4cd3..8bb7386f62 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml @@ -16,7 +16,7 @@ - command_grep_encryption_flag.rc == 0 # encryption enabled run_once: true # makes no sense to execute it more than once (would be redundant) environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: Get kubeadm-config ConfigMap command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index 09d1b4b445..6c2ec62f92 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -2,7 +2,7 @@ - name: Change keycloak statefulset to use {{ image_registry_address }} run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade-master | Get keycloak statefulset name shell: |- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml index bbc071408f..7675c0e61f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml @@ -2,7 +2,7 @@ - name: Change rabbitmq stateful set to use {{ image_registry_address }} run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade-master | Get rabbitmq statefulset name shell: |- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml index 54a52146b0..18e4a17386 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml @@ -2,7 +2,7 @@ - name: upgrade-master | Patch imageRepository in kubeadm-config ConfigMap run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade-master | Get kubeadm-config configmap command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml index 9f4d358f24..3bd31c4171 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml @@ -4,7 +4,7 @@ - name: uprgade-kubernetes-dashboard | Delete old kubernetes dashboard run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: Check if any resource with label 'k8s-app=kubernetes-dashboard' exists in kube-system command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml index 17d9145195..248a7a01c2 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml @@ -2,7 +2,7 @@ - name: upgrade-master | Determine which network plugin is used run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade-master | Set default cni plugin - flannel set_fact: diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml index c3917efc1f..a6e5f8df37 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml @@ -9,7 +9,7 @@ - delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade | Drain master or node in preparation for maintenance command: >- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml index 7d33869f9c..c2b32274c4 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml @@ -9,7 +9,7 @@ - delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: upgrade | Uncordon master or node - mark it as schedulable command: >- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml index 45b2c71576..c0c6ae543e 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml @@ -2,7 +2,7 @@ - delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: Wait for kubectl to access K8s cluster command: >- diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml index 9a4dac7bc6..10a07e48ae 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml @@ -2,7 +2,7 @@ - delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: Wait for kubectl to find and access K8s cluster include_tasks: kubernetes/utils/wait-for-kube-apiserver.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml index 326bd23009..0d6f2e2707 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml @@ -1,7 +1,7 @@ --- - name: verify-upgrade | Verify cluster versions environment: - KUBECONFIG: /home/{{ admin_user.name }}/.kube/config + KUBECONFIG: /etc/kubernetes/admin.conf block: - name: Verify cluster version when: diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/provide-kubeconfig.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/provide-kubeconfig.yml deleted file mode 100644 index 84125be03b..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/provide-kubeconfig.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Create .kube directory - file: - path: "/home/{{ admin_user.name }}/.kube/" - state: directory - owner: "{{ admin_user.name }}" - group: "{{ admin_user.name }}" - -- name: Provide .kube/config - copy: - dest: "/home/{{ admin_user.name }}/.kube/config" - content: "{{ hostvars[groups.kubernetes_master.0].kube_config.content | b64decode }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/upgrade.yml b/core/src/epicli/data/common/ansible/playbooks/upgrade.yml index 0e447e6b86..0589f8dd40 100644 --- a/core/src/epicli/data/common/ansible/playbooks/upgrade.yml +++ b/core/src/epicli/data/common/ansible/playbooks/upgrade.yml @@ -9,17 +9,6 @@ - import_role: name: kubernetes_common tasks_from: gather-facts - - import_role: - name: upgrade - tasks_from: ensure-kubeconfig - -- hosts: kubernetes_node - become: true - become_method: sudo - tasks: - - import_role: - name: upgrade - tasks_from: provide-kubeconfig - hosts: all become: true From 4fa37d43de858c3738d75d5ca6bb0b5dbc001f9b Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Thu, 16 Jul 2020 21:05:07 +0200 Subject: [PATCH 06/19] upgrade: statefulset patching refactor --- .../reconfigure-auth-service-app.yml | 59 ++--------------- .../kubernetes/reconfigure-rabbitmq-app.yml | 59 ++--------------- .../utils/patch-statefulset-step.yml | 65 +++++++++++++++++++ .../kubernetes/utils/patch-statefulset.yml | 37 +++++++++++ 4 files changed, 110 insertions(+), 110 deletions(-) create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index 6c2ec62f92..1b6ef33be5 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -1,56 +1,5 @@ --- -- name: Change keycloak statefulset to use {{ image_registry_address }} - run_once: true - environment: - KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: upgrade-master | Get keycloak statefulset name - shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | - grep -i keycloak | - awk '{print $1}' - vars: - _jsonpath: >- - '{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' - changed_when: false - register: keycloak_ss_name - args: &args - executable: /bin/bash - - - name: upgrade-master | Get keycloak namespace - shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | - grep -i keycloak | - awk '{print $1}' - vars: - _jsonpath: >- - '{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' - changed_when: false - register: keycloak_namespace - args: *args - - - name: upgrade-master | Get keycloak image - shell: |- - kubectl get statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} -o=jsonpath={{ _jsonpath }} - vars: - _jsonpath: >- - '{$.spec.template.spec.containers[:1].image}' - changed_when: false - register: keycloak_repository - when: - - not keycloak_namespace.stdout == "" - - - name: upgrade-master | Patch keycloak to use {{ image_registry_address }} - shell: |- - kubectl patch statefulsets.apps {{ keycloak_ss_name.stdout }} -n {{ keycloak_namespace.stdout }} --patch '{{ _patch | to_json }}' - vars: - _patch: - spec: - template: - spec: - containers: - - name: "{{ keycloak_ss_name.stdout }}" - image: "{{ image_registry_address }}/{{ keycloak_repository.stdout }}" - when: - - not keycloak_namespace.stdout == "" - - not image_registry_address in keycloak_repository.stdout +- name: upgrade-master | Patch keycloak's statefulset + include_tasks: utils/patch-statefulset.yml + vars: + image_regexp: 'jboss/keycloak:' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml index 7675c0e61f..eaeb211c98 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml @@ -1,56 +1,5 @@ --- -- name: Change rabbitmq stateful set to use {{ image_registry_address }} - run_once: true - environment: - KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: upgrade-master | Get rabbitmq statefulset name - shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | - grep -i rabbitmq | - awk '{print $1}' - vars: - _jsonpath: >- - '{range .items[*]}{.metadata.name}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' - changed_when: false - register: rabbit_mq_ss_name - args: &args - executable: /bin/bash - - - name: upgrade-master | Get rabbitmq namespace - shell: |- - kubectl get statefulsets.apps --all-namespaces -o=jsonpath={{ _jsonpath }} | - grep -i rabbitmq | - awk '{print $1}' - vars: - _jsonpath: >- - '{range .items[*]}{.metadata.namespace}{"\t"}{.spec.template.spec.containers[].image}{"\n"}{end}' - changed_when: false - register: rabbit_mq_namespace - args: *args - - - name: upgrade-master | Get rabbitmq image - shell: |- - kubectl get statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} -o=jsonpath={{ _jsonpath }} - vars: - _jsonpath: >- - '{$.spec.template.spec.containers[:1].image}' - changed_when: false - register: rabbit_mq_repository - when: - - not rabbit_mq_namespace.stdout == "" - - - name: upgrade-master | Patch rabbitmq to use {{ image_registry_address }} - shell: |- - kubectl patch statefulsets.apps {{ rabbit_mq_ss_name.stdout }} -n {{ rabbit_mq_namespace.stdout }} --patch '{{ _patch | to_json }}' - vars: - _patch: - spec: - template: - spec: - containers: - - name: "{{ rabbit_mq_ss_name.stdout }}" - image: "{{ image_registry_address }}/{{ rabbit_mq_repository.stdout }}" - when: - - not rabbit_mq_namespace.stdout == "" - - not image_registry_address in rabbit_mq_repository.stdout +- name: upgrade-master | Patch rabbitmq's statefulset + include_tasks: utils/patch-statefulset.yml + vars: + image_regexp: 'rabbitmq:' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml new file mode 100644 index 0000000000..396ee0701a --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml @@ -0,0 +1,65 @@ +--- +# Expected vars: +# - image_registry_address +# - image_regexp +# - _namespace +# - _statefulset +# - _documents + +- name: upgrade-k8s | Patch statefulset + when: + - _names_and_images_updated != _names_and_images + - _containers_updated | length > 0 + + block: + - name: upgrade-k8s | Patch statefulset {{ _statefulset }} (step) + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + connand: | + kubectl patch statefulsets.apps {{ _statefulset }} \ + --namespace {{ _namespace }} \ + --patch '{{ _patch | to_json }}' + + vars: + # Select correct documents + _documents_filtered: >- + {{ _documents | selectattr('metadata.namespace', 'eq', _namespace) + | selectattr('metadata.name', 'eq', _statefulset) + | list }} + + _containers: >- + {{ _documents_filtered | map(attribute='spec.template.spec.containers') | flatten }} + + _names: >- + {{ _containers | map(attribute='name') | list }} + + _images: >- + {{ _containers | map(attribute='image') | list }} + + # Prepend image urls with the registry address + _images_updated: >- + {{ _images | map('regex_replace', '^' ~ image_registry_address ~ '/', '') + | map('regex_replace', '^', image_registry_address ~ '/') + | list }} + + _names_and_images: >- + {{ _names | zip(_images) | list }} + + _names_and_images_updated: >- + {{ _names | zip(_images_updated) | list }} + + # Update containers (yields list of dictionaries) + _containers_updated: >- + {%- set output = [] -%} + {%- for name, image in _names_and_images_updated -%} + {%- if image | regex_search(image_regexp) -%} + {{- output.append(dict(name=name, image=image)) -}} + {%- endif -%} + {%- endfor -%} + {{- output -}} + + _patch: + spec: + template: + spec: + containers: "{{ _containers_updated }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml new file mode 100644 index 0000000000..5cfaae97e9 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml @@ -0,0 +1,37 @@ +--- +# Expected vars: +# - image_registry_address +# - image_regexp + +- name: upgrade-k8s | Get all statefulsets + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + command: | + kubectl get statefulsets.apps \ + --all-namespaces \ + --output json + register: command_statefulsets + changed_when: false + +- name: upgrade-k8s | Patch all statefulsets + block: + - name: upgrade-k8s | Patch statefulset {{ _statefulset }} + include_tasks: utils/patch-statefulset-step.yml + vars: + _namespace: "{{ item.0 }}" + _statefulset: "{{ item.1 }}" + loop: >- + {{ _namespaces | zip(_statefulsets) | list }} + + vars: + # Parse output from kubeadm + _documents: >- + {{ (command_statefulsets.stdout | from_json)['items'] }} + + # Extract namespaces from all documents + _namespaces: >- + {{ _documents | map(attribute='metadata.namespace') | list }} + + # Extract statefulset names from all documents + _statefulsets: >- + {{ _documents | map(attribute='metadata.name') | list }} From b698b40ba15233fff9a5373bab4e776a4daa72ce Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Thu, 16 Jul 2020 21:08:54 +0200 Subject: [PATCH 07/19] upgrade: cleanups and refactor for logs --- .../roles/upgrade/tasks/kubernetes.yml | 30 ++++++------- .../tasks/kubernetes/Debian/hold-packages.yml | 2 +- .../kubernetes/Debian/install-kubeadm.yml | 7 ++- .../install-packages-cni-in-kubelet.yml | 10 ++--- .../kubernetes/Debian/install-packages.yml | 10 ++--- .../kubernetes/RedHat/install-kubeadm.yml | 2 +- .../install-packages-cni-in-kubelet.yml | 2 +- .../kubernetes/RedHat/install-packages.yml | 2 +- .../tasks/kubernetes/downgrade-coredns.yml | 8 ++-- .../tasks/kubernetes/get-cluster-version.yml | 2 +- .../patch-kubeadm-etcd-encryption.yml | 6 +-- .../reconfigure-auth-service-app.yml | 2 +- .../kubernetes/reconfigure-rabbitmq-app.yml | 2 +- .../update-kubeadm-image-repository.yml | 6 +-- .../tasks/kubernetes/upgrade-coredns.yml | 2 +- .../upgrade-kubernetes-dashboard.yml | 16 +++---- .../tasks/kubernetes/upgrade-master0.yml | 44 +++++++++---------- .../tasks/kubernetes/upgrade-masterN.yml | 24 +++++----- .../kubernetes/upgrade-network-components.yml | 20 ++++----- .../upgrade/tasks/kubernetes/upgrade-node.yml | 22 +++++----- .../upgrade/tasks/kubernetes/utils/drain.yml | 24 ++++++---- .../utils/patch-statefulset-step.yml | 6 +-- .../kubernetes/utils/patch-statefulset.yml | 8 ++-- .../utils/reload-kubelet-and-docker.yml | 10 ++--- .../tasks/kubernetes/utils/uncordon.yml | 27 ++++++------ .../utils/wait-for-kube-apiserver.yml | 21 +++++---- .../upgrade/tasks/kubernetes/utils/wait.yml | 20 +++++---- .../tasks/kubernetes/verify-upgrade.yml | 28 ++++++------ 28 files changed, 185 insertions(+), 178 deletions(-) diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml index 55c4e156aa..756e33312f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml @@ -1,47 +1,45 @@ --- -- name: Wait for kube-apiserver then get cluster and kubelet version +- name: k8s | Wait for kube-apiserver then get cluster and kubelet version delegate_to: "{{ groups.kubernetes_master[0] }}" block: - - name: Include wait-for-kube-apiserver.yml + - name: k8s | Include wait-for-kube-apiserver.yml import_tasks: kubernetes/utils/wait-for-kube-apiserver.yml - - name: Include get-cluster-version.yml + - name: k8s | Include get-cluster-version.yml import_tasks: kubernetes/get-cluster-version.yml # sets cluster_version -- name: Check if upgrade from current K8s version is supported +- name: k8s | Check if upgrade from current K8s version is supported assert: that: cluster_version is version('v1.14.6', '>=') fail_msg: Your Kubernetes version ({{ cluster_version }}) is not supported by this version of Epiphany which requires at least version 1.14.6 (Epiphany v0.4.4). For more information, refer to the documentation. quiet: true -- name: Include get-kubelet-version.yml +- name: k8s | Include get-kubelet-version.yml import_tasks: kubernetes/get-kubelet-version.yml # sets kubelet_version delegate_to: "{{ groups['kubernetes_master'][0] }}" -- vars: +- name: k8s | Upgrade masters then nodes + vars: version: "{{ ver }}" cni_version: "{{ cni_ver }}" block: - - when: cluster_version is version('v' + version, '<=') + - name: k8s | Upgrade masters + when: cluster_version is version('v' + version, '<=') block: - - name: Upgrade master0 to v{{ version }} + - name: k8s | Upgrade first master to v{{ version }} include_tasks: kubernetes/upgrade-master0.yml when: - inventory_hostname == groups.kubernetes_master[0] - - name: Upgrade masterN to v{{ version }} + - name: k8s | Upgrade next master to v{{ version }} include_tasks: kubernetes/upgrade-masterN.yml when: - inventory_hostname in groups.kubernetes_master[1:] - - name: upgrade-master | Verify cluster version - include_tasks: kubernetes/verify-upgrade.yml - when: - - inventory_hostname == groups.kubernetes_master[0] - - - when: kubelet_version is version('v' + version, '<=') + - name: k8s | Upgrade nodes + when: kubelet_version is version('v' + version, '<=') block: - - name: Upgrade node to v{{ version }} + - name: k8s | Upgrade node to v{{ version }} include_tasks: kubernetes/upgrade-node.yml when: - groups.kubernetes_node is defined diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml index 406236d788..fee994bbf6 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/hold-packages.yml @@ -1,5 +1,5 @@ --- -- name: Hold packages: {{ packages | join( ', ' ) }} +- name: k8s/install | Hold packages {{ packages | join( ', ' ) }} command: >- apt-mark hold {{ packages | join( ' ' ) }} vars: diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml index 2f8c6b80c3..145a3dea60 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml @@ -1,15 +1,14 @@ --- -- name: Unhold kubeadm package +- name: k8s/install | Unhold kubeadm package command: >- apt-mark unhold kubeadm -- name: >- - Install kubeadm {{ version }} package for Debian family +- name: k8s/install | Install kubeadm {{ version }} package for Debian family apt: name: kubeadm={{ version }}-00 update_cache: true state: present -- name: Hold kubeadm package +- name: k8s/install | Hold kubeadm package command: >- apt-mark hold kubeadm diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml index 5ecc31a6d7..6b17dee48f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages-cni-in-kubelet.yml @@ -1,11 +1,11 @@ --- -- name: install-packages | Get information about installed packages as facts +- name: k8s/install | Get information about installed packages as facts package_facts: manager: auto changed_when: false # Unhold before removing to avoid error -- name: "install-packages | Unhold packages: {{ packages | join( ', ' ) }}" +- name: k8s/install | Unhold packages {{ packages | join( ', ' ) }} command: >- apt-mark unhold {{ packages | join( ' ' ) }} vars: @@ -16,7 +16,7 @@ ['kubelet', 'kubectl'] {%- endif -%} -- name: install-packages | Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module +- name: k8s/install | Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module apt: name: - kubelet @@ -25,7 +25,7 @@ when: ansible_facts.packages['kubelet'][0].version is version(version + '-00', '>') or ansible_facts.packages['kubectl'][0].version is version(version + '-00', '>') -- name: "install-packages | Install kubelet {{ version }} and kubectl {{ version }} packages for Debian family" +- name: k8s/install | Install kubelet {{ version }} and kubectl {{ version }} packages for Debian family apt: name: - kubelet={{ version }}-00 # removes (replaces) kubernetes-cni when full version is 1.17.7-00 but not when 1.17.7-01 @@ -33,6 +33,6 @@ update_cache: yes state: present -- name: install-packages | Hold kubelet and kubectl +- name: k8s/install | Hold kubelet and kubectl command: >- apt-mark hold kubelet kubectl diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml index 6e40ec6584..32ff1d3c6e 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml @@ -1,15 +1,15 @@ --- -- name: install-packages | Get information about installed packages as facts +- name: k8s/install | Get information about installed packages as facts package_facts: manager: auto changed_when: false # Unhold before removing to avoid error -- name: install-packages | Unhold kubelet, kubectl and kubernetes-cni +- name: k8s/install | Unhold kubelet, kubectl and kubernetes-cni command: >- apt-mark unhold kubelet kubectl kubernetes-cni -- name: install-packages | Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module +- name: k8s/install | Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module apt: name: kubernetes-cni kubelet kubectl state: absent @@ -17,7 +17,7 @@ or ansible_facts.packages['kubelet'][0].version is version (version + '-00', '>') or ansible_facts.packages['kubectl'][0].version is version (version + '-00', '>') -- name: "install-packages | Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} packages for Debian family" +- name: k8s/install | Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} packages for Debian family apt: name: - kubernetes-cni={{ cni_version }}-00 @@ -26,6 +26,6 @@ update_cache: yes state: present -- name: install-packages | Hold kubelet, kubectl and kubernetes-cni +- name: k8s/install | Hold kubelet, kubectl and kubernetes-cni command: >- apt-mark hold kubelet kubectl kubernetes-cni diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml index 8a1f0b1c17..9f74207868 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-kubeadm.yml @@ -1,5 +1,5 @@ --- -- name: Install kubeadm-{{ version }} package for RedHat family +- name: k8s/install | Install kubeadm-{{ version }} package for RedHat family yum: name: kubeadm-{{ version }}-0 update_cache: true diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml index 2c53a02504..24c2a8a985 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages-cni-in-kubelet.yml @@ -1,5 +1,5 @@ --- -- name: "install-packages | Install kubelet-{{ version }} and kubectl-{{ version }} for RedHat family" +- name: k8s/install | Install kubelet-{{ version }} and kubectl-{{ version }} for RedHat family yum: name: - kubelet-{{ version }}-0 # removes (replaces) kubernetes-cni when full version is 1.17.7-0 but not when 1.17.7-1 diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml index 4f88ce4ce8..28226ee0e7 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/RedHat/install-packages.yml @@ -1,5 +1,5 @@ --- -- name: "install-packages | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }} and kubectl-{{ version }} for RedHat family" +- name: k8s/install | Install kubernetes-cni-{{ cni_version }}, kubelet-{{ version }} and kubectl-{{ version }} for RedHat family yum: name: - kubernetes-cni-{{ cni_version }}-0 diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml index 123e08e1ab..897ee7e4b7 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/downgrade-coredns.yml @@ -12,7 +12,7 @@ #+-----------------------------------------------------------------+ # Source: look for 'CoreDNSVersion' at https://github.com/kubernetes/kubernetes/blob/$TAG/cmd/kubeadm/app/constants/constants.go -- name: upgrade-master | Create directory /etc/epiphany/manifests +- name: k8s/master | Create directory /etc/epiphany/manifests become: true file: path: /etc/epiphany/manifests @@ -21,11 +21,11 @@ group: root mode: u=rwx,go=r -- name: Upload and apply template +- name: k8s/master | Upload and apply template vars: file_name: coredns-config-for-k8s-below-1.16.yml block: - - name: upgrade-master | Upload {{ file_name }} file + - name: k8s/master | Upload {{ file_name }} file template: src: kubernetes/{{ file_name }}.j2 dest: /etc/epiphany/manifests/{{ file_name }} @@ -33,7 +33,7 @@ group: "{{ admin_user.name }}" mode: u=rw,go=r - - name: "upgrade-master | Apply /etc/epiphany/manifests/{{ file_name }} file" + - name: k8s/master | Apply /etc/epiphany/manifests/{{ file_name }} file environment: KUBECONFIG: /etc/kubernetes/admin.conf command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml index 5e4fe6e23a..74af495de4 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cluster-version.yml @@ -6,9 +6,9 @@ set -o pipefail && kubectl version --short -o json | jq --raw-output '.serverVersion.gitVersion' register: cluster_version - changed_when: false args: executable: /bin/bash + changed_when: false - name: Set cluster version as fact set_fact: diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml index 8bb7386f62..bcc329d77c 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/patch-kubeadm-etcd-encryption.yml @@ -4,14 +4,14 @@ # kube-apiserver uses --encryption-provider-config parameter to control how data is encrypted in etcd. # If this parameter is absent the encryption is not enabled. -- name: upgrade-master | Check if encryption of secret data is enabled +- name: k8s/master | Check if encryption of secret data is enabled command: >- grep -- '--encryption-provider-config' /etc/kubernetes/manifests/kube-apiserver.yaml register: command_grep_encryption_flag changed_when: false failed_when: command_grep_encryption_flag.rc > 1 -- name: upgrade-master | Patch kubeadm-config ConfigMap if needed +- name: k8s/master | Patch kubeadm-config ConfigMap if needed when: - command_grep_encryption_flag.rc == 0 # encryption enabled run_once: true # makes no sense to execute it more than once (would be redundant) @@ -27,7 +27,7 @@ changed_when: false # The following procedure ensures that etcd encryption is always enabled during subsequent kubeadm executions - - name: upgrade-master | Patch kubeadm-config configmap (patch-kubeadm-etcd-encryption.yml) + - name: k8s/master | Patch kubeadm-config configmap (patch-kubeadm-etcd-encryption.yml) when: - _kubeadm_api_server_extra_args['encryption-provider-config'] is undefined command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index 1b6ef33be5..3cec04246e 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -1,5 +1,5 @@ --- -- name: upgrade-master | Patch keycloak's statefulset +- name: k8s/master | Patch keycloak's statefulset include_tasks: utils/patch-statefulset.yml vars: image_regexp: 'jboss/keycloak:' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml index eaeb211c98..d57b717fd3 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml @@ -1,5 +1,5 @@ --- -- name: upgrade-master | Patch rabbitmq's statefulset +- name: k8s/master | Patch rabbitmq's statefulset include_tasks: utils/patch-statefulset.yml vars: image_regexp: 'rabbitmq:' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml index 18e4a17386..77c13debdb 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/update-kubeadm-image-repository.yml @@ -1,10 +1,10 @@ --- -- name: upgrade-master | Patch imageRepository in kubeadm-config ConfigMap +- name: k8s/master | Patch imageRepository in kubeadm-config ConfigMap run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf block: - - name: upgrade-master | Get kubeadm-config configmap + - name: k8s/master | Get kubeadm-config configmap command: | kubectl get configmap kubeadm-config \ --namespace kube-system \ @@ -12,7 +12,7 @@ register: command_kubeadm_configmap changed_when: false - - name: upgrade-master | Patch kubeadm-config configmap (update-kubeadm-image-repository.yml) + - name: k8s/master | Patch kubeadm-config configmap (update-kubeadm-image-repository.yml) when: - _image_repository_updated != _image_repository # skip the task if nothing changed command: | diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml index 5beda7a980..e220305f7e 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-coredns.yml @@ -1,5 +1,5 @@ --- -- name: Apply latest coredns +- name: k8s/master | Apply latest coredns import_role: name: kubernetes_master tasks_from: apply-coredns diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml index 3bd31c4171..b979b71cd0 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubernetes-dashboard.yml @@ -1,24 +1,24 @@ --- # Delete old kubernetes-dashboard from kube-system, new dashboard has its own namespace - TODO remove this block in 0.7.0 -- name: uprgade-kubernetes-dashboard | Delete old kubernetes dashboard +- name: k8s/master | Delete old kubernetes dashboard run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf block: - - name: Check if any resource with label 'k8s-app=kubernetes-dashboard' exists in kube-system + - name: k8s/master | Check if any resource with label 'k8s/app=kubernetes-dashboard' exists in kube-system command: | - kubectl get all -l k8s-app=kubernetes-dashboard -n kube-system + kubectl get all -l k8s/app=kubernetes-dashboard -n kube-system register: result changed_when: false - - name: Delete all resources with label 'k8s-app=kubernetes-dashboard' from kube-system + - name: k8s/master | Delete all resources with label 'k8s/app=kubernetes-dashboard' from kube-system command: | - kubectl delete all -l k8s-app=kubernetes-dashboard -n kube-system + kubectl delete all -l k8s/app=kubernetes-dashboard -n kube-system when: - not 'No resources found' in result.stderr - - name: Check if 'kubernetes-dashboard-minimal' Role or RoleBinding exists in kube-system + - name: k8s/master | Check if 'kubernetes-dashboard-minimal' Role or RoleBinding exists in kube-system command: | kubectl get Role,RoleBinding kubernetes-dashboard-minimal -n kube-system register: result @@ -27,7 +27,7 @@ - result.rc != 0 - not 'not found' in result.stderr - - name: Delete 'kubernetes-dashboard-minimal' Role and RoleBinding from kube-system + - name: k8s/master | Delete 'kubernetes-dashboard-minimal' Role and RoleBinding from kube-system command: | kubectl delete Role,RoleBinding kubernetes-dashboard-minimal -n kube-system when: @@ -35,7 +35,7 @@ # Deploy new version of kubernetes-dashboard -- name: Apply Kubernetes Dashboard +- name: k8s/master | Apply Kubernetes Dashboard import_role: name: kubernetes_master tasks_from: apply-dashboard diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index 70735d40ce..e8045c7847 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -1,5 +1,5 @@ --- -- name: upgrade-master | Wait for cluster's readiness +- name: k8s/master0 | Wait for cluster's readiness include_tasks: utils/wait.yml # This resolves issues (related to the etcd encryption) causing upgrades to hang. @@ -7,34 +7,34 @@ # If config is incomplete, kubeadm rewrites the kube-apiserver.yaml manifest file without the etcd feature enabled. # In turn, this causes Kuberentes components such as the controller-manager to lose ability to read internal (kube-system) secrets, then # any upgrade attempt freezes and the cluster at hand becomes unusable. -- name: upgrade-master | Make sure the etcd encryption feature is properly configured (if enabled) +- name: k8s/master0 | Make sure the etcd encryption feature is properly configured (if enabled) include_tasks: patch-kubeadm-etcd-encryption.yml -- name: upgrade-master | Update imageRepository in kubeadm-config ConfigMap +- name: k8s/master0 | Update imageRepository in kubeadm-config ConfigMap include_tasks: update-kubeadm-image-repository.yml -- name: upgrade-master | Reconfigure rabbitmq application +- name: k8s/master0 | Reconfigure rabbitmq application include_tasks: reconfigure-rabbitmq-app.yml -- name: upgrade-master | Reconfigure keycloak application +- name: k8s/master0 | Reconfigure keycloak application include_tasks: reconfigure-auth-service-app.yml -- name: upgrade-master | Upgrade kubernetes-dashboard +- name: k8s/master0 | Upgrade kubernetes-dashboard include_tasks: upgrade-kubernetes-dashboard.yml -- name: upgrade-master | Drain master in preparation for maintenance +- name: k8s/master0 | Drain master in preparation for maintenance include_tasks: utils/drain.yml -- name: upgrade-master | Upgrade, configure packages +- name: k8s/master0 | Upgrade, configure packages block: - - name: upgrade-master | Hold packages for Debian family + - name: k8s/master0 | Hold packages for Debian family include_tasks: Debian/hold-packages.yml when: ansible_os_family == "Debian" - - name: upgrade-master | Install kubeadm + - name: k8s/master0 | Install kubeadm include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - - name: upgrade-master | Upgrade network components for Kubernetes >= v1.16.x + - name: k8s/master0 | Upgrade network components for Kubernetes >= v1.16.x include_tasks: upgrade-network-components.yml when: - version is version('1.16.7', '>=') @@ -42,16 +42,16 @@ # CoreDNS is overwritten (upgraded or downgraded) by 'kubeadm upgrade apply', see CoreDNS version matrix in downgrade-coredns.yml. # kubeadm upgrade is not able to downgrade coredns ConfigMap properly (at least when upgrade from 1.14.6 to 1.15.10) # which may cause the update to hang. - - name: upgrade-master | Downgrade CoreDNS to K8s built-in version + - name: k8s/master0 | Downgrade CoreDNS to K8s built-in version include_tasks: downgrade-coredns.yml when: - version is version('1.16.0', '<') - - name: upgrade-master | Wait for cluster's readiness + - name: k8s/master0 | Wait for cluster's readiness include_tasks: utils/wait.yml # Note: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended since v1.16 - - name: upgrade-master | Validate whether cluster is upgradeable + - name: k8s/master0 | Validate whether cluster is upgradeable # Ignore CoreDNSUnsupportedPlugins error since coredns-migration does not support 'hosts' plugin. # This issue is fixed in K8s v1.18, see https://github.com/kubernetes/kubernetes/pull/88482 command: >- @@ -68,7 +68,7 @@ changed_when: false # Note: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended since v1.16 - - name: upgrade-master | Upgrade K8s cluster to v{{ version }} + - name: k8s/master0 | Upgrade K8s cluster to v{{ version }} # Ignore CoreDNSUnsupportedPlugins error since coredns-migration does not support 'hosts' plugin. # This issue is fixed in K8s v1.18, see https://github.com/kubernetes/kubernetes/pull/88482 command: >- @@ -96,28 +96,28 @@ when: - result is succeeded -- name: upgrade-master | Wait for cluster's readiness +- name: k8s/master0 | Wait for cluster's readiness include_tasks: utils/wait.yml # 'kubeadm upgrade apply' overwrites Epiphany's customized CoreDNS deployment so we restore it. # This task should be run each time K8s is upgraded to the latest version. # Keep Epiphany's CoreDNS version in synch with K8s CoreDNS version. -- name: upgrade-master | Deploy customized CoreDNS for latest Kubernetes (1.17.7) +- name: k8s/master0 | Deploy customized CoreDNS for latest Kubernetes (1.17.7) include_tasks: upgrade-coredns.yml when: - version is version('1.17.7', '==') -- name: upgrade-master | Upgrade kubeadm-config.yml if exists +- name: k8s/master0 | Upgrade kubeadm-config.yml if exists include_tasks: upgrade-kubeadm-config.yml -- name: upgrade-master | Upgrade Docker # this may restart Docker daemon +- name: k8s/master0 | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-master | Reload kubelet and docker +- name: k8s/master0 | Reload kubelet and docker include_tasks: utils/reload-kubelet-and-docker.yml -- name: upgrade-master | Uncordon master - mark master as schedulable +- name: k8s/master0 | Uncordon master - mark master as schedulable include_tasks: utils/uncordon.yml -- name: upgrade-master | Verify component versions and node status +- name: k8s/master0 | Verify component versions and node status include_tasks: kubernetes/verify-upgrade.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml index 7a6d18af60..1d5feb23ee 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml @@ -1,17 +1,17 @@ --- -- name: upgrade-masterN | Drain master in preparation for maintenance +- name: k8s/masterN | Drain master in preparation for maintenance include_tasks: utils/drain.yml -- name: Upgrade, configure packages +- name: k8s/masterN | Upgrade, configure packages block: - - name: upgrade-masterN | Hold packages for Debian family + - name: k8s/masterN | Hold packages for Debian family include_tasks: Debian/hold-packages.yml when: ansible_os_family == "Debian" - - name: upgrade-masterN | Install kubeadm + - name: k8s/masterN | Install kubeadm include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - - name: upgrade-masterN | Upgrade master {{ inventory_hostname }} + - name: k8s/masterN | Upgrade master {{ inventory_hostname }} command: >- kubeadm upgrade node register: result @@ -21,7 +21,7 @@ delay: 30 changed_when: false - - name: name: upgrade-masterN | Install kubelet and kubectl for {{ version }} + - name: k8s/masterN | Install kubelet and kubectl for {{ version }} include_tasks: >- {%- if cni_in_kubelet is undefined or not cni_in_kubelet -%} {{ ansible_os_family }}/install-packages.yml @@ -31,20 +31,20 @@ when: - result is succeeded -- name: upgrade-masterN | Wait for cluster's readiness +- name: k8s/masterN | Wait for cluster's readiness include_tasks: utils/wait.yml -- name: upgrade-masterN | Upgrade kubeadm-config.yml if exists +- name: k8s/masterN | Upgrade kubeadm-config.yml if exists include_tasks: upgrade-kubeadm-config.yml -- name: upgrade-masterN | Upgrade Docker # this may restart Docker daemon +- name: k8s/masterN | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-masterN | Reload kubelet and docker +- name: k8s/masterN | Reload kubelet and docker include_tasks: utils/reload-kubelet-and-docker.yml -- name: upgrade-masterN | Uncordon master - mark master as schedulable +- name: k8s/masterN | Uncordon master - mark master as schedulable include_tasks: utils/uncordon.yml -- name: upgrade-masterN | Verify component versions and node status +- name: k8s/masterN | Verify component versions and node status include_tasks: kubernetes/verify-upgrade.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml index 248a7a01c2..5189238b64 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml @@ -1,14 +1,14 @@ --- -- name: upgrade-master | Determine which network plugin is used +- name: k8s/master | Determine which network plugin is used run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf block: - - name: upgrade-master | Set default cni plugin - flannel + - name: k8s/master | Set default cni plugin - flannel set_fact: plugin: "flannel" - - name: upgrade-master | Wait for API resources to propagate + - name: k8s/master | Wait for API resources to propagate shell: >- kubectl api-resources --cached=false && kubectl -n kube-system get daemonsets register: daemonsets_query_result @@ -18,31 +18,31 @@ delay: 30 changed_when: false - - name: upgrade-master | If canal is installed on the cluster + - name: k8s/master | If canal is installed on the cluster command: >- - kubectl -n kube-system get daemonsets -l k8s-app=canal + kubectl -n kube-system get daemonsets -l k8s/app=canal register: canal_query_result changed_when: false - - name: upgrade-master | Set network plugin variable to canal + - name: k8s/master | Set network plugin variable to canal set_fact: plugin: "canal" when: - '"canal" in canal_query_result.stdout' - - name: upgrade-master | If calico is installed on the cluster + - name: k8s/master | If calico is installed on the cluster command: >- - kubectl -n kube-system get daemonsets -l k8s-app=calico-node + kubectl -n kube-system get daemonsets -l k8s/app=calico-node register: calico_query_result changed_when: false - - name: upgrade-master | Set network plugin variable to calico + - name: k8s/master | Set network plugin variable to calico set_fact: plugin: "calico" when: - '"calico" in calico_query_result.stdout' -- name: "Apply network plugin configured by user {{ plugin }}" +- name: k8s/master | Apply network plugin configured by user {{ plugin }} import_role: name: kubernetes_master tasks_from: apply-network-components diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml index ccedaed2e0..2037f983fe 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-node.yml @@ -1,20 +1,20 @@ --- -- name: upgrade-node | Drain node in preparation for maintenance +- name: k8s/node | Drain node in preparation for maintenance include_tasks: utils/drain.yml -- name: upgrade-node | Upgrade Docker # this may restart Docker daemon +- name: k8s/node | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml -- name: upgrade-node | Upgrade, configure packages +- name: k8s/node | Upgrade, configure packages block: - - name: upgrade-node | Hold packages for Debian family + - name: k8s/node | Hold packages for Debian family include_tasks: "Debian/hold-packages.yml" when: ansible_os_family == "Debian" - - name: upgrade-node | Install kubeadm + - name: k8s/node | Install kubeadm include_tasks: "{{ ansible_os_family }}/install-kubeadm.yml" - - name: upgrade-node | Upgrade node config + - name: k8s/node | Upgrade node config command: >- kubeadm upgrade node config --kubelet-version v{{ version }} register: result @@ -23,7 +23,7 @@ retries: 20 delay: 30 - - name: upgrade-node | Install packages + - name: k8s/node | Install packages include_tasks: >- {%- if cni_in_kubelet is undefined or not cni_in_kubelet -%} {{ ansible_os_family }}/install-packages.yml @@ -31,17 +31,17 @@ {{ ansible_os_family }}/install-packages-cni-in-kubelet.yml {%- endif -%} -- name: upgrade-node | Restart kubelet +- name: k8s/node | Restart kubelet systemd: state: restarted daemon_reload: true name: kubelet -- name: upgrade-node | Wait for cluster's readiness +- name: k8s/node | Wait for cluster's readiness include_tasks: utils/wait.yml -- name: upgrade-node | Uncordon node - mark node as schedulable +- name: k8s/node | Uncordon node - mark node as schedulable include_tasks: utils/uncordon.yml -- name: upgrade-node | Verify component versions and node status +- name: k8s/node | Verify component versions and node status include_tasks: verify-upgrade.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml index a6e5f8df37..2efd31f5cf 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/drain.yml @@ -1,19 +1,27 @@ --- -- name: upgrade | Wait for cluster's readiness +- name: k8s/utils | Wait for cluster's readiness include_tasks: kubernetes/utils/wait.yml -- when: +- name: k8s/utils | Drain master or node + when: - groups.kubernetes_node is defined - groups.kubernetes_node | length > 0 # drain only if there is at least one worker node + block: - - delegate_to: "{{ groups.kubernetes_master[0] }}" + - name: k8s/utils | Drain master or node in preparation for maintenance + delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: upgrade | Drain master or node in preparation for maintenance - command: >- - kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data + command: | + kubectl drain {{ inventory_hostname }} \ + --ignore-daemonsets \ + --delete-local-data + register: result + until: + - result is succeeded + retries: 20 + delay: 5 - - name: upgrade | Wait for cluster's readiness + - name: k8s/utils | Wait for cluster's readiness include_tasks: kubernetes/utils/wait.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml index 396ee0701a..8504f6263f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml @@ -6,16 +6,16 @@ # - _statefulset # - _documents -- name: upgrade-k8s | Patch statefulset +- name: k8s/utils | Patch statefulset (step) when: - _names_and_images_updated != _names_and_images - _containers_updated | length > 0 block: - - name: upgrade-k8s | Patch statefulset {{ _statefulset }} (step) + - name: k8s/utils | Patch statefulset {{ _statefulset }} (step) environment: KUBECONFIG: /etc/kubernetes/admin.conf - connand: | + command: | kubectl patch statefulsets.apps {{ _statefulset }} \ --namespace {{ _namespace }} \ --patch '{{ _patch | to_json }}' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml index 5cfaae97e9..369ac21184 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml @@ -3,7 +3,7 @@ # - image_registry_address # - image_regexp -- name: upgrade-k8s | Get all statefulsets +- name: k8s/utils | Get all statefulsets environment: KUBECONFIG: /etc/kubernetes/admin.conf command: | @@ -13,10 +13,10 @@ register: command_statefulsets changed_when: false -- name: upgrade-k8s | Patch all statefulsets +- name: k8s/utils | Patch all statefulsets block: - - name: upgrade-k8s | Patch statefulset {{ _statefulset }} - include_tasks: utils/patch-statefulset-step.yml + - name: k8s/utils | Patch statefulset {{ _statefulset }} + include_tasks: patch-statefulset-step.yml vars: _namespace: "{{ item.0 }}" _statefulset: "{{ item.1 }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml index 31b31c38e1..f5c7731f84 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/reload-kubelet-and-docker.yml @@ -1,25 +1,25 @@ --- -- name: upgrade | Stop Kubelet +- name: k8s/utils | Stop Kubelet systemd: state: stopped name: kubelet -- name: upgrade | Stop Docker +- name: k8s/utils | Stop Docker systemd: state: stopped name: docker -- name: upgrade | Reload daemon +- name: k8s/utils | Reload daemon systemd: daemon_reload: true -- name: upgrade | Start Docker +- name: k8s/utils | Start Docker systemd: name: docker state: started enabled: true -- name: upgrade | Start Kubelet +- name: k8s/utils | Start Kubelet systemd: name: kubelet state: started diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml index c2b32274c4..81173e732c 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/uncordon.yml @@ -1,24 +1,25 @@ --- -- name: upgrade | Wait for cluster's readiness +- name: k8s/utils | Wait for cluster's readiness include_tasks: kubernetes/utils/wait.yml -- when: +- name: k8s/utils | Uncordon master or node + when: - groups.kubernetes_node is defined - groups.kubernetes_node | length > 0 # master or node is drained only if there is at least one worker node + block: - - delegate_to: "{{ groups.kubernetes_master[0] }}" + - name: k8s/utils | Uncordon master or node - mark it as schedulable + delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: upgrade | Uncordon master or node - mark it as schedulable - command: >- - kubectl uncordon {{ inventory_hostname }} - register: result - until: - - result is succeeded - retries: 20 - delay: 5 + command: >- + kubectl uncordon {{ inventory_hostname }} + register: result + until: + - result is succeeded + retries: 20 + delay: 5 - - name: upgrade | Wait for cluster's readiness + - name: k8s/utils | Wait for cluster's readiness include_tasks: kubernetes/utils/wait.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml index c0c6ae543e..fc9dd05f3f 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait-for-kube-apiserver.yml @@ -1,15 +1,14 @@ --- -- delegate_to: "{{ groups.kubernetes_master[0] }}" +- name: k8s/utils | Wait for kubectl to access K8s cluster + delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: Wait for kubectl to access K8s cluster - command: >- - kubectl cluster-info - register: result - until: - - result is succeeded and "running" in result.stdout - retries: 60 # 1min - delay: 1 - changed_when: false + command: >- + kubectl cluster-info + register: result + until: + - result is succeeded and "running" in result.stdout + retries: 60 # 1min + delay: 1 + changed_when: false diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml index 10a07e48ae..e1b0f9e850 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/wait.yml @@ -1,15 +1,16 @@ --- -- delegate_to: "{{ groups.kubernetes_master[0] }}" +- name: k8s/utils | Wait for nodes and pods to be ready + delegate_to: "{{ groups.kubernetes_master[0] }}" run_once: true environment: KUBECONFIG: /etc/kubernetes/admin.conf block: - - name: Wait for kubectl to find and access K8s cluster + - name: k8s/utils | Wait for kubectl to find and access K8s cluster include_tasks: kubernetes/utils/wait-for-kube-apiserver.yml - - name: Wait for all nodes to be ready + - name: k8s/utils | Wait for nodes command: >- - kubectl get nodes -o json + kubectl get nodes --output json register: result until: - result is succeeded @@ -18,11 +19,12 @@ delay: 2 changed_when: false - - when: wait_for_pods | default(false) | bool + - name: k8s/utils | Wait for pods + when: wait_for_pods | default(false) | bool block: - - name: Wait for all pods to be running + - name: k8s/utils | Wait for all pods to be running command: >- - kubectl get pods --all-namespaces -o json + kubectl get pods --all-namespaces --output json register: result until: - result is succeeded @@ -31,9 +33,9 @@ delay: 2 changed_when: false - - name: Wait for all pods to be ready + - name: k8s/utils | Wait for all pods to be ready command: >- - kubectl get pods --all-namespaces -o json + kubectl get pods --all-namespaces --output json register: result until: - result is succeeded diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml index 0d6f2e2707..959c5fb145 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/verify-upgrade.yml @@ -1,25 +1,25 @@ --- -- name: verify-upgrade | Verify cluster versions +- name: k8s/verify | Verify cluster versions environment: - KUBECONFIG: /etc/kubernetes/admin.conf + KUBECONFIG: /etc/kubernetes/kubelet.conf block: - - name: Verify cluster version + - name: k8s/verify | Verify cluster version when: - inventory_hostname in groups.kubernetes_master block: - - name: verify-upgrade | Include wait-for-kube-apiserver.yml + - name: k8s/verify | Include wait-for-kube-apiserver.yml include_tasks: utils/wait-for-kube-apiserver.yml - - name: verify-upgrade | Include get-cluster-version.yml + - name: k8s/verify | Include get-cluster-version.yml include_tasks: get-cluster-version.yml # sets cluster_version - - name: verify-upgrade | Verify cluster version + - name: k8s/verify | Verify cluster version assert: that: version in cluster_version - - name: Verify kubectl version + - name: k8s/verify | Verify kubectl version block: - - name: verify-upgrade | Get kubectl version + - name: k8s/verify | Get kubectl version shell: |- set -o pipefail && kubectl version --client --short -o json | jq --raw-output '.clientVersion.gitVersion' @@ -28,23 +28,23 @@ executable: /bin/bash changed_when: false - - name: verify-upgrade | Verify kubectl version + - name: k8s/verify | Verify kubectl version assert: that: version in kubectl_version.stdout - - name: Verify kubeadm version + - name: k8s/verify | Verify kubeadm version block: - - name: verify-upgrade | Get kubeadm version + - name: k8s/verify | Get kubeadm version command: >- kubeadm version -o short register: kubeadm_version changed_when: false - - name: verify-upgrade | Verify kubeadm version + - name: k8s/verify | Verify kubeadm version assert: that: version in kubeadm_version.stdout - - name: verify-upgrade | Verify kubelet version from API server and get node status + - name: k8s/verify | Verify kubelet version from API server and get node status run_once: true shell: |- set -o pipefail && @@ -60,6 +60,6 @@ args: *args changed_when: false - - name: verify-upgrade | Verify node status + - name: k8s/verify | Verify node status assert: that: "'Ready' in node_status_and_version.stdout" From d3bf97383930da7037fb160aaeb2b5f9260bf56f Mon Sep 17 00:00:00 2001 From: to-bar <46519524+to-bar@users.noreply.github.com> Date: Fri, 10 Jul 2020 20:08:40 +0200 Subject: [PATCH 08/19] Make deployment manifest tasks more generic --- .../roles/kubernetes_master/defaults/main.yml | 2 ++ .../kubernetes_master/tasks/deployments/apply-file.yml | 9 +++------ .../tasks/deployments/deploy-file.yml | 10 ++++++---- .../tasks/deployments/deploy-template.yml | 8 +++++--- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/defaults/main.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/defaults/main.yml index 06f674d130..f7050c77c4 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/defaults/main.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/defaults/main.yml @@ -16,3 +16,5 @@ kubelet_custom_config: systemReserved: cpu: 50m memory: 768Mi # based on RedHat 7.5 on Standard_DS1_v2 Azure VM with =~ 30 pods + +epiphany_manifests_dir: /etc/epiphany/manifests diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml index 5a762ffd48..1aef96a3b0 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/apply-file.yml @@ -1,9 +1,6 @@ --- -- name: "Apply /etc/epiphany/manifests/{{ file_name }} file" +- name: Apply {{ file_path }} file environment: KUBECONFIG: /etc/kubernetes/admin.conf - shell: | - kubectl apply \ - -f /etc/epiphany/manifests/{{ file_name }} - args: - executable: /bin/bash + shell: >- + kubectl apply -f {{ file_path }} diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-file.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-file.yml index a915d9d591..c20b14da5a 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-file.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-file.yml @@ -2,20 +2,22 @@ - name: Create directory for files become: true file: - path: /etc/epiphany/manifests + path: "{{ epiphany_manifests_dir }}" state: directory owner: root group: root - mode: u=rw,go=r + mode: u=rwx,go=r -- name: "Copy {{ file_name }}" +- name: Upload {{ file_name }} file become: true copy: src: "{{ file_name }}" - dest: "/etc/epiphany/manifests/{{ file_name }}" + dest: "{{ epiphany_manifests_dir }}/{{ file_name }}" owner: "{{ admin_user.name }}" group: "{{ admin_user.name }}" mode: u=rw,go=r - name: Apply file include_tasks: apply-file.yml + vars: + file_path: "{{ epiphany_manifests_dir }}/{{ file_name }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-template.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-template.yml index 79f88ed63b..c469076ac2 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-template.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/deployments/deploy-template.yml @@ -2,20 +2,22 @@ - name: Create directory for files become: true file: - path: /etc/epiphany/manifests + path: "{{ epiphany_manifests_dir }}" state: directory owner: root group: root mode: u=rwx,go=r -- name: "Upload {{ file_name }} file" +- name: Upload {{ file_name }} file become: true template: src: "{{ file_name }}" - dest: "/etc/epiphany/manifests/{{ file_name }}" + dest: "{{ epiphany_manifests_dir }}/{{ file_name | regex_replace('.j2$') }}" owner: "{{ admin_user.name }}" group: "{{ admin_user.name }}" mode: u=rw,go=r - name: Apply file include_tasks: apply-file.yml + vars: + file_path: "{{ epiphany_manifests_dir }}/{{ file_name | regex_replace('.j2$') }}" From 5d1e7638e9881ab3cab475df5a8fe745a86be898 Mon Sep 17 00:00:00 2001 From: to-bar <46519524+to-bar@users.noreply.github.com> Date: Thu, 9 Jul 2020 19:53:17 +0200 Subject: [PATCH 09/19] Improve detecting CNI plugin --- .../tasks/apply-network-components.yml | 26 ++------- .../tasks/cni-plugins/wait-for-cni-plugin.yml | 21 ++++++++ .../tasks/kubernetes/get-cni-plugin.yml | 53 +++++++++++++++++++ .../tasks/kubernetes/upgrade-master0.yml | 5 ++ .../kubernetes/upgrade-network-components.yml | 49 ++--------------- 5 files changed, 86 insertions(+), 68 deletions(-) create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/wait-for-cni-plugin.yml create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cni-plugin.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml index d868adc148..5f23af6d49 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/apply-network-components.yml @@ -1,25 +1,7 @@ --- - name: Apply network plugin configured by user - include_tasks: "./cni-plugins/{{ network_plugin }}.yml" + include_tasks: cni-plugins/{{ network_plugin }}.yml -# Wait for CNI plugin become ready to prevent failure of 'Get token from master' task on node before joining master -- name: Wait for CNI plugin become ready - shell: >- - kubectl wait --for=condition=Ready pods -l {{ selectors[network_plugin] }} - --field-selector=spec.nodeName=$(hostname --long) -n kube-system --timeout=10s - args: - executable: /bin/bash - environment: - KUBECONFIG: /etc/kubernetes/admin.conf - register: wait_for_cni_plugin - until: wait_for_cni_plugin is succeeded - retries: 30 - delay: 1 - changed_when: false - vars: - selectors: - calico: k8s-app=calico-node - canal: k8s-app=canal - flannel: app=flannel - when: - - network_plugin in selectors.keys() +# Wait for CNI plugin to become ready to prevent failure of 'Get token from master' task on node before joining master +- name: Include wait-for-cni-plugin.yml + include_tasks: cni-plugins/wait-for-cni-plugin.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/wait-for-cni-plugin.yml b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/wait-for-cni-plugin.yml new file mode 100644 index 0000000000..5f4a720f65 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/kubernetes_master/tasks/cni-plugins/wait-for-cni-plugin.yml @@ -0,0 +1,21 @@ +--- +# This file is meant to be also used by upgrade role + +- name: Wait for CNI plugin to become ready + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + shell: >- + kubectl wait --for=condition=Ready pods -l {{ selectors[network_plugin] }} + --field-selector=spec.nodeName=$(hostname --long) -n kube-system --timeout=10s + args: + executable: /bin/bash + register: wait_for_cni_plugin + until: wait_for_cni_plugin is succeeded + retries: 30 + delay: 1 + changed_when: false + vars: + selectors: + calico: k8s-app=calico-node + canal: k8s-app=canal + flannel: app=flannel diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cni-plugin.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cni-plugin.yml new file mode 100644 index 0000000000..6e5e60e829 --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/get-cni-plugin.yml @@ -0,0 +1,53 @@ +--- +- name: k8s/master | Check which CNI plugin is installed + vars: + kubectl_get_ds_cmd: >- + kubectl get daemonsets -n kube-system -o=json | jq --raw-output + '.items[].metadata | select (.labels."k8s-app" == "canal" + or .labels."k8s-app" == "calico-node" + or .labels.app == "flannel") | .name' + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + block: + - name: k8s/master | Check which CNI plugin is installed + shell: >- + {{ kubectl_get_ds_cmd }} + changed_when: false + register: shell_plugin_query + args: + executable: /bin/bash + rescue: + - name: Print caught exception info before trying rescue task + debug: + var: shell_plugin_query + + # 'kubectl api-resources --cached=false' is used to avoid errors like: + # Error from server (NotFound): Unable to find \"extensions/v1beta1, Resource=daemonsets\" that match label selector \"k8s-app=canal\" + - name: k8s/master | Check which CNI plugin is installed (rescue task) + shell: >- + kubectl api-resources --cached=false && + {{ kubectl_get_ds_cmd }} + changed_when: false + register: shell_plugin_query + args: + executable: /bin/bash + +- name: k8s/master | Set CNI plugin as fact + set_fact: + cni_plugin_name: >- + {%- if 'kube-flannel-ds' in shell_plugin_query.stdout -%} + flannel + {%- elif 'canal' in shell_plugin_query.stdout -%} + canal + {%- elif 'calico-node' in shell_plugin_query.stdout -%} + calico + {%- else -%} + unknown + {%- endif -%} + +- name: k8s/master | Assert CNI plugin was found + assert: + that: + - cni_plugin_name != 'unknown' + fail_msg: CNI plugin not found + success_msg: "CNI plugin: {{ cni_plugin_name }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index e8045c7847..3d338bdfa0 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -22,6 +22,11 @@ - name: k8s/master0 | Upgrade kubernetes-dashboard include_tasks: upgrade-kubernetes-dashboard.yml +- name: k8s/master0 | Get CNI plugin if undefined + include_tasks: get-cni-plugin.yml + when: + - cni_plugin_name is undefined + - name: k8s/master0 | Drain master in preparation for maintenance include_tasks: utils/drain.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml index 5189238b64..43d7550229 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-network-components.yml @@ -1,51 +1,8 @@ --- -- name: k8s/master | Determine which network plugin is used - run_once: true - environment: - KUBECONFIG: /etc/kubernetes/admin.conf - block: - - name: k8s/master | Set default cni plugin - flannel - set_fact: - plugin: "flannel" - - - name: k8s/master | Wait for API resources to propagate - shell: >- - kubectl api-resources --cached=false && kubectl -n kube-system get daemonsets - register: daemonsets_query_result - until: - - daemonsets_query_result is success - retries: 20 - delay: 30 - changed_when: false - - - name: k8s/master | If canal is installed on the cluster - command: >- - kubectl -n kube-system get daemonsets -l k8s/app=canal - register: canal_query_result - changed_when: false - - - name: k8s/master | Set network plugin variable to canal - set_fact: - plugin: "canal" - when: - - '"canal" in canal_query_result.stdout' - - - name: k8s/master | If calico is installed on the cluster - command: >- - kubectl -n kube-system get daemonsets -l k8s/app=calico-node - register: calico_query_result - changed_when: false - - - name: k8s/master | Set network plugin variable to calico - set_fact: - plugin: "calico" - when: - - '"calico" in calico_query_result.stdout' - -- name: k8s/master | Apply network plugin configured by user {{ plugin }} - import_role: +- name: k8s/master | Apply network plugin configured by user {{ network_plugin }} + include_role: name: kubernetes_master tasks_from: apply-network-components vars: - network_plugin: "{{ plugin }}" + network_plugin: "{{ cni_plugin_name }}" k8s_server_version: "{{ cluster_version }}" From c38eb9d580ac9e52d7de0762087d28c0afc01cc8 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Fri, 17 Jul 2020 09:15:33 +0200 Subject: [PATCH 10/19] AnsibleVarsGenerator.py: fixing regression issue introducted during upgrade refactor --- core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py index 016462b7c5..65c88b2701 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py @@ -93,7 +93,11 @@ def populate_group_vars(self, ansible_dir): main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars) - shared_config_doc = self.inventory_upgrade.shared_config + if self.is_upgrade_run: + shared_config_doc = self.inventory_upgrade.shared_config + else: + shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') + if shared_config_doc == None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') From b32770d8ece4876695abd5b78df7bf91d41d2c89 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Fri, 17 Jul 2020 12:57:00 +0200 Subject: [PATCH 11/19] Apply suggestions from code review Co-authored-by: to-bar <46519524+to-bar@users.noreply.github.com> --- .../tasks/kubernetes/backup-kubeadm-config.yml | 6 ++++++ .../tasks/kubernetes/upgrade-kubeadm-config.yml | 12 ------------ .../upgrade/tasks/kubernetes/upgrade-master0.yml | 4 ++-- .../upgrade/tasks/kubernetes/upgrade-masterN.yml | 4 ++-- 4 files changed, 10 insertions(+), 16 deletions(-) create mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/backup-kubeadm-config.yml delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/backup-kubeadm-config.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/backup-kubeadm-config.yml new file mode 100644 index 0000000000..5ef9bf6d1b --- /dev/null +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/backup-kubeadm-config.yml @@ -0,0 +1,6 @@ +--- +# The kubeadm-config.yml file is no longer used during upgrade process, +# but we keep it for backup and reference purposes. +- name: k8s/master | Save kubeadm-config ConfigMap to file + command: >- + kubeadm config view > /etc/kubeadm/kubeadm-config.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml deleted file mode 100644 index 8f0bbc41ae..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-kubeadm-config.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: upgrade-kubeadm-config | Check if /etc/kubeadm/kubeadm-config.yml exists - stat: - path: /etc/kubeadm/kubeadm-config.yml - register: kubeadm_config_file - changed_when: false - -- when: kubeadm_config_file.stat.exists - block: - - name: upgrade-kubeadm-config | Save kubeadm-config ConfigMap to file - command: >- - kubeadm config view > /etc/kubeadm/kubeadm-config.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index 3d338bdfa0..55a397a1c7 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -112,8 +112,8 @@ when: - version is version('1.17.7', '==') -- name: k8s/master0 | Upgrade kubeadm-config.yml if exists - include_tasks: upgrade-kubeadm-config.yml +- name: k8s/master0 | Backup kubeadm-config.yml + include_tasks: backup-kubeadm-config.yml - name: k8s/master0 | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml index 1d5feb23ee..9b0bdb4aeb 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-masterN.yml @@ -34,8 +34,8 @@ - name: k8s/masterN | Wait for cluster's readiness include_tasks: utils/wait.yml -- name: k8s/masterN | Upgrade kubeadm-config.yml if exists - include_tasks: upgrade-kubeadm-config.yml +- name: k8s/masterN | Backup kubeadm-config.yml + include_tasks: backup-kubeadm-config.yml - name: k8s/masterN | Upgrade Docker # this may restart Docker daemon include_tasks: docker.yml From 1989cc47be53ffad11facef4dbb7a0bbac06069c Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Sat, 18 Jul 2020 00:27:23 +0200 Subject: [PATCH 12/19] upgrade: statefulset patching refactor - patching all containers (fix) - patching init containers also (fix) - removing include_tasks statements (speedup) --- .../reconfigure-auth-service-app.yml | 2 +- .../kubernetes/reconfigure-rabbitmq-app.yml | 2 +- .../utils/patch-statefulset-step.yml | 65 ----------- .../kubernetes/utils/patch-statefulset.yml | 103 ++++++++++++++++-- 4 files changed, 93 insertions(+), 79 deletions(-) delete mode 100644 core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml index 3cec04246e..35f513db9d 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-auth-service-app.yml @@ -2,4 +2,4 @@ - name: k8s/master | Patch keycloak's statefulset include_tasks: utils/patch-statefulset.yml vars: - image_regexp: 'jboss/keycloak:' + image_regexp: 'jboss/keycloak:.*' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml index d57b717fd3..17cc3d10a9 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/reconfigure-rabbitmq-app.yml @@ -2,4 +2,4 @@ - name: k8s/master | Patch rabbitmq's statefulset include_tasks: utils/patch-statefulset.yml vars: - image_regexp: 'rabbitmq:' + image_regexp: 'rabbitmq:.*' diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml deleted file mode 100644 index 8504f6263f..0000000000 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset-step.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -# Expected vars: -# - image_registry_address -# - image_regexp -# - _namespace -# - _statefulset -# - _documents - -- name: k8s/utils | Patch statefulset (step) - when: - - _names_and_images_updated != _names_and_images - - _containers_updated | length > 0 - - block: - - name: k8s/utils | Patch statefulset {{ _statefulset }} (step) - environment: - KUBECONFIG: /etc/kubernetes/admin.conf - command: | - kubectl patch statefulsets.apps {{ _statefulset }} \ - --namespace {{ _namespace }} \ - --patch '{{ _patch | to_json }}' - - vars: - # Select correct documents - _documents_filtered: >- - {{ _documents | selectattr('metadata.namespace', 'eq', _namespace) - | selectattr('metadata.name', 'eq', _statefulset) - | list }} - - _containers: >- - {{ _documents_filtered | map(attribute='spec.template.spec.containers') | flatten }} - - _names: >- - {{ _containers | map(attribute='name') | list }} - - _images: >- - {{ _containers | map(attribute='image') | list }} - - # Prepend image urls with the registry address - _images_updated: >- - {{ _images | map('regex_replace', '^' ~ image_registry_address ~ '/', '') - | map('regex_replace', '^', image_registry_address ~ '/') - | list }} - - _names_and_images: >- - {{ _names | zip(_images) | list }} - - _names_and_images_updated: >- - {{ _names | zip(_images_updated) | list }} - - # Update containers (yields list of dictionaries) - _containers_updated: >- - {%- set output = [] -%} - {%- for name, image in _names_and_images_updated -%} - {%- if image | regex_search(image_regexp) -%} - {{- output.append(dict(name=name, image=image)) -}} - {%- endif -%} - {%- endfor -%} - {{- output -}} - - _patch: - spec: - template: - spec: - containers: "{{ _containers_updated }}" diff --git a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml index 369ac21184..6a8ad17be7 100644 --- a/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml +++ b/core/src/epicli/data/common/ansible/playbooks/roles/upgrade/tasks/kubernetes/utils/patch-statefulset.yml @@ -5,7 +5,7 @@ - name: k8s/utils | Get all statefulsets environment: - KUBECONFIG: /etc/kubernetes/admin.conf + KUBECONFIG: &KUBECONFIG /etc/kubernetes/admin.conf command: | kubectl get statefulsets.apps \ --all-namespaces \ @@ -14,24 +14,103 @@ changed_when: false - name: k8s/utils | Patch all statefulsets + environment: + KUBECONFIG: *KUBECONFIG block: - - name: k8s/utils | Patch statefulset {{ _statefulset }} - include_tasks: patch-statefulset-step.yml + - name: k8s/utils | Patch statefulset (containers) + when: + - _item.changed + command: | + kubectl patch statefulsets.apps {{ _item.metadata.name }} \ + --namespace {{ _item.metadata.namespace }} \ + --patch '{{ _patch | to_json }}' + vars: + _patch: + spec: + template: + spec: + containers: + - "{{ _item.data }}" + loop_control: + loop_var: _item + loop: >- + {{ _updates.containers }} + + - name: k8s/utils | Patch statefulset (initContainers) + when: + - _item.changed + command: | + kubectl patch statefulsets.apps {{ _item.metadata.name }} \ + --namespace {{ _item.metadata.namespace }} \ + --patch '{{ _patch | to_json }}' vars: - _namespace: "{{ item.0 }}" - _statefulset: "{{ item.1 }}" + _patch: + spec: + template: + spec: + initContainers: + - "{{ _item.data }}" + loop_control: + loop_var: _item loop: >- - {{ _namespaces | zip(_statefulsets) | list }} + {{ _updates.initContainers }} vars: # Parse output from kubeadm _documents: >- {{ (command_statefulsets.stdout | from_json)['items'] }} - # Extract namespaces from all documents - _namespaces: >- - {{ _documents | map(attribute='metadata.namespace') | list }} + _updates: >- + {%- set ns = namespace() -%} + {%- set ns.containers = [] -%} + {%- set ns.initContainers = [] -%} + + {%- for document in _documents -%} + {%- set ns.images = document.spec.template.spec.containers | map(attribute='image') | list -%} + + {%- if ns.images | map('regex_search', image_regexp) | select | list -%} + {%- set ns.prefixes = (ns.images | map('regex_replace', image_regexp) | unique | list) + [image_registry_address ~ '/'] -%} + {%- set ns.lengths = ns.prefixes | map('length') | list -%} + {%- set ns.prefixes_sorted = ns.prefixes | zip(ns.lengths) | sort(attribute=1, reverse=true) | list -%} + + {%- for container in document.spec.template.spec.containers -%} + {%- set ns.image = container.image -%} + + {%- for prefix, _ in ns.prefixes_sorted -%} + {%- set ns.image = ns.image | regex_replace('^' ~ prefix) -%} + {%- endfor -%} + + {%- set ns.image = image_registry_address ~ '/' ~ ns.image -%} + + {{- + ns.containers.append({ + "metadata": { "name": document.metadata.name, "namespace": document.metadata.namespace }, + "data": { "name": container.name, "image": ns.image }, + "changed": ns.image != container.image, + }) + -}} + {%- endfor -%} + + {%- for container in document.spec.template.spec.initContainers | default([]) -%} + {%- set ns.image = container.image -%} + + {%- for prefix, _ in ns.prefixes_sorted -%} + {%- set ns.image = ns.image | regex_replace('^' ~ prefix) -%} + {%- endfor -%} + + {%- set ns.image = image_registry_address ~ '/' ~ ns.image -%} + + {{- + ns.initContainers.append({ + "metadata": { "name": document.metadata.name, "namespace": document.metadata.namespace }, + "data": { "name": container.name, "image": ns.image }, + "changed": ns.image != container.image, + }) + -}} + {%- endfor -%} + + {%- endif -%} + + {%- endfor -%} - # Extract statefulset names from all documents - _statefulsets: >- - {{ _documents | map(attribute='metadata.name') | list }} + {{- dict(containers=ns.containers, initContainers=ns.initContainers) -}} From 5c9cdb6743087ee59ccf2517adae947b920b5780 Mon Sep 17 00:00:00 2001 From: to-bar <46519524+to-bar@users.noreply.github.com> Date: Sat, 18 Jul 2020 11:27:15 +0200 Subject: [PATCH 13/19] Ensure settings for backward compatibility --- .../cli/engine/ansible/AnsibleInventoryUpgrade.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index 17c86dc2f1..2f76d17259 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -82,6 +82,16 @@ def upgrade(self): manifest_docs = load_yamls_file(path_to_manifest) self.shared_config = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') + # Ensure settings for backward compatibility + compatibility_settings = { + 'vault_location': '', # for Epiphany v0.4 + 'use_ha_control_plane': False, # for Epiphany v0.5 + 'promote_to_ha': False # for Epiphany v0.5 + } + for key, value in compatibility_settings.items(): + if key not in self.shared_config.specification: + self.shared_config.specification[key] = value + if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') From 63085d7dfd2e9d5d2bbb03e5988dd6e917f0f7c1 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 14:24:58 +0200 Subject: [PATCH 14/19] Revert "Ensure settings for backward compatibility" This reverts commit 5c9cdb6743087ee59ccf2517adae947b920b5780. --- .../cli/engine/ansible/AnsibleInventoryUpgrade.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index 2f76d17259..17c86dc2f1 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -82,16 +82,6 @@ def upgrade(self): manifest_docs = load_yamls_file(path_to_manifest) self.shared_config = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') - # Ensure settings for backward compatibility - compatibility_settings = { - 'vault_location': '', # for Epiphany v0.4 - 'use_ha_control_plane': False, # for Epiphany v0.5 - 'promote_to_ha': False # for Epiphany v0.5 - } - for key, value in compatibility_settings.items(): - if key not in self.shared_config.specification: - self.shared_config.specification[key] = value - if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') From 7d37c9b44fd75d852aea1e34b9c86623df566057 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 14:03:04 +0200 Subject: [PATCH 15/19] AnsibleInventoryUpgrade.py: merging shared-config with defaults --- .../epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index 17c86dc2f1..1f53de779e 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -13,6 +13,8 @@ from cli.models.AnsibleHostModel import AnsibleHostModel from cli.models.AnsibleInventoryItem import AnsibleInventoryItem +from cli.engine.schema.DefaultMerger import DefaultMerger + class AnsibleInventoryUpgrade(Step): def __init__(self, build_dir, backup_build_dir): @@ -82,6 +84,10 @@ def upgrade(self): manifest_docs = load_yamls_file(path_to_manifest) self.shared_config = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') + # Merge the shared config doc with defaults + with DefaultMerger([self.shared_config]) as doc_merger: + self.shared_config = doc_merger.run()[0] + if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') From 42833601e2b4b49022e268138edce72aad722edf Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 19:36:27 +0200 Subject: [PATCH 16/19] Adding changelog entry --- CHANGELOG-0.7.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG-0.7.md b/CHANGELOG-0.7.md index 12833679ef..4f95624080 100644 --- a/CHANGELOG-0.7.md +++ b/CHANGELOG-0.7.md @@ -13,6 +13,7 @@ - [#1399](https://github.com/epiphany-platform/epiphany/issues/1399) - Epicli upgrade: Kubernetes upgrade may hang - [#1398](https://github.com/epiphany-platform/epiphany/issues/1398) - Vault installation fails when using canal/calico network plugin - [#1412](https://github.com/epiphany-platform/epiphany/issues/1412) - Certificate in Vault is also generated or copied even if flag in configuration tls_disable is set to true +- [#1408](https://github.com/epiphany-platform/epiphany/issues/1408) - Epiphany does not support upgrades for Kubernetes in HA mode ### Added From a987ca2bd9276890f349c616365b82b07784120b Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 20:24:31 +0200 Subject: [PATCH 17/19] Revert "AnsibleVarsGenerator.py: fixing regression issue introducted during upgrade refactor" This reverts commit c38eb9d580ac9e52d7de0762087d28c0afc01cc8. --- core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py index 65c88b2701..016462b7c5 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py @@ -93,11 +93,7 @@ def populate_group_vars(self, ansible_dir): main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars) - if self.is_upgrade_run: - shared_config_doc = self.inventory_upgrade.shared_config - else: - shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') - + shared_config_doc = self.inventory_upgrade.shared_config if shared_config_doc == None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') From 5cfa13fd6f44b2854c676d09a1544958474762d6 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 20:28:16 +0200 Subject: [PATCH 18/19] Revert "epicli/upgrade: reusing existing shared-config + cleanups" This reverts commit e5957c55d2e78c51fcddd461043d0347af256d2b. --- core/src/epicli/cli/engine/UpgradeEngine.py | 10 ++--- .../engine/ansible/AnsibleInventoryUpgrade.py | 45 +++++-------------- .../engine/ansible/AnsibleVarsGenerator.py | 6 +-- 3 files changed, 20 insertions(+), 41 deletions(-) diff --git a/core/src/epicli/cli/engine/UpgradeEngine.py b/core/src/epicli/cli/engine/UpgradeEngine.py index 9e15469d58..756d20a119 100644 --- a/core/src/epicli/cli/engine/UpgradeEngine.py +++ b/core/src/epicli/cli/engine/UpgradeEngine.py @@ -28,24 +28,24 @@ def get_backup_dirs(self): for d in os.listdir(self.build_dir): bd = os.path.join(self.build_dir, d) if os.path.isdir(bd) and re.match(r'backup_\d', d): result.append(bd) - return result + return result def backup_build(self): - # Check if there are backup dirs and if so take the latest to work with + # check if there are backup dirs and if so take the latest to work with. backup_dirs = self.get_backup_dirs() if len(backup_dirs) > 0: self.backup_build_dir = max(backup_dirs , key=os.path.getmtime) self.logger.info(f'There is already a backup present. Using latest for upgrade: "{self.backup_build_dir}"') return - # No backup dir so use the latest + # no backup dir so use the latest backup_dir_name = f'backup_{int(round(time.time() * 1000))}' self.backup_build_dir = os.path.join(self.build_dir, backup_dir_name ) self.logger.info(f'Backing up build dir to "{self.backup_build_dir}"') shutil.copytree(self.build_dir, self.backup_build_dir) def upgrade(self): - # Backup existing build + # backup existing build self.backup_build() # Run Ansible to upgrade infrastructure @@ -53,4 +53,4 @@ def upgrade(self): ansible_options=self.ansible_options) as ansible_runner: ansible_runner.upgrade() - return 0 + return 0 \ No newline at end of file diff --git a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py index 1f53de779e..fabd077442 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py @@ -1,19 +1,11 @@ -import os - from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager - from cli.helpers.Step import Step -from cli.helpers.build_saver import (get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY, - save_inventory, MANIFEST_FILE_NAME) -from cli.helpers.data_loader import load_yamls_file -from cli.helpers.objdict_helpers import dict_to_objdict -from cli.helpers.doc_list_helpers import select_single - +from cli.helpers.build_saver import get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY from cli.models.AnsibleHostModel import AnsibleHostModel from cli.models.AnsibleInventoryItem import AnsibleInventoryItem - -from cli.engine.schema.DefaultMerger import DefaultMerger +from cli.helpers.build_saver import save_inventory +from cli.helpers.objdict_helpers import dict_to_objdict class AnsibleInventoryUpgrade(Step): @@ -22,14 +14,13 @@ def __init__(self, build_dir, backup_build_dir): self.build_dir = build_dir self.backup_build_dir = backup_build_dir self.cluster_model = None - self.shared_config = None def __enter__(self): super().__enter__() return self def __exit__(self, exc_type, exc_value, traceback): - super().__exit__(exc_type, exc_value, traceback) + super().__exit__(exc_type, exc_value, traceback) def get_role(self, inventory, role_name): for role in inventory: @@ -41,7 +32,7 @@ def delete_role(self, inventory, role_name): for i in range(len(inventory)): if inventory[i].role == role_name: del inventory[i] - return + return def rename_role(self, inventory, role_name, new_role_name): role = self.get_role(inventory, role_name) @@ -49,13 +40,13 @@ def rename_role(self, inventory, role_name, new_role_name): role.role = new_role_name def upgrade(self): - inventory_path = get_inventory_path_for_build(self.backup_build_dir) + inventory_path = get_inventory_path_for_build(self.backup_build_dir) build_version = check_build_output_version(self.backup_build_dir) self.logger.info(f'Loading backup Ansible inventory: {inventory_path}') loaded_inventory = InventoryManager(loader = DataLoader(), sources=inventory_path) - # Move loaded inventory to templating structure + # move loaded inventory to templating structure new_inventory = [] for key in loaded_inventory.groups: if key != 'all' and key != 'ungrouped': @@ -65,7 +56,7 @@ def upgrade(self): new_hosts.append(AnsibleHostModel(host.address, host.vars['ansible_host'])) new_inventory.append(AnsibleInventoryItem(key, new_hosts)) - # Reconstruct cluster model with all data necessary to run required upgrade rolls + # re-constructure cluster model with all data necessary to run required upgrade rolls self.cluster_model = dict_to_objdict({ 'provider': 'any', 'specification': { @@ -76,18 +67,6 @@ def upgrade(self): } }) - # Reuse shared config from existing manifest - # Shared config contains the use_ha_control_plane flag which is required during upgrades - path_to_manifest = os.path.join(self.backup_build_dir, MANIFEST_FILE_NAME) - if not os.path.isfile(path_to_manifest): - raise Exception('No manifest.yml inside the build folder') - manifest_docs = load_yamls_file(path_to_manifest) - self.shared_config = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') - - # Merge the shared config doc with defaults - with DefaultMerger([self.shared_config]) as doc_merger: - self.shared_config = doc_merger.run()[0] - if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') @@ -100,7 +79,7 @@ def upgrade(self): self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter') self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy') - # Remove linux and reboot roles if present + # remove linux and reboot roles if present self.delete_role(new_inventory, 'linux') self.delete_role(new_inventory, 'reboot') else: @@ -112,21 +91,21 @@ def upgrade(self): raise Exception('No kubernetes_master to use as repository') master_node = master.hosts[0] - # Add image_registry + # add image_registry image_registry = self.get_role(new_inventory, 'image_registry') if image_registry == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('image_registry', hosts)) - # Add repository + # add repository repository = self.get_role(new_inventory, 'repository') if repository == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('repository', hosts)) - # Save new inventory + # save new inventory save_inventory(new_inventory, self.cluster_model, self.build_dir) return 0 diff --git a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py index 016462b7c5..f4853e5db1 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py @@ -93,10 +93,10 @@ def populate_group_vars(self, ansible_dir): main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars) - shared_config_doc = self.inventory_upgrade.shared_config + shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') if shared_config_doc == None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') - + self.set_vault_path(shared_config_doc) main_vars.update(shared_config_doc.specification) @@ -115,7 +115,7 @@ def set_vault_path(self, shared_config): shared_config.specification.vault_tmp_file_location = Config().vault_password_location cluster_name = self.get_cluster_name() shared_config.specification.vault_location = get_ansible_vault_path(cluster_name) - + def get_cluster_name(self): if 'name' in self.cluster_model.specification.keys(): return self.cluster_model.specification.name From 41ed3550426aa2239ed3f153e7ca5014f03dd4f8 Mon Sep 17 00:00:00 2001 From: Michal Opala Date: Wed, 22 Jul 2020 21:55:05 +0200 Subject: [PATCH 19/19] AnsibleVarsGenerator.py: adding nicer way to handle shared config --- .../engine/ansible/AnsibleVarsGenerator.py | 42 +++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py index f4853e5db1..3d0686a5e9 100644 --- a/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py +++ b/core/src/epicli/cli/engine/ansible/AnsibleVarsGenerator.py @@ -2,13 +2,15 @@ import copy from cli.helpers.Step import Step -from cli.helpers.build_saver import get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path -from cli.helpers.doc_list_helpers import select_first +from cli.helpers.build_saver import get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, MANIFEST_FILE_NAME +from cli.helpers.doc_list_helpers import select_first, select_single from cli.helpers.naming_helpers import to_feature_name, to_role_name from cli.helpers.ObjDict import ObjDict from cli.helpers.yaml_helpers import dump from cli.helpers.Config import Config -from cli.helpers.data_loader import load_yaml_obj, types, load_all_documents_from_folder +from cli.helpers.data_loader import load_yaml_obj, types, load_yamls_file, load_all_documents_from_folder + +from cli.engine.schema.DefaultMerger import DefaultMerger class AnsibleVarsGenerator(Step): @@ -93,10 +95,14 @@ def populate_group_vars(self, ansible_dir): main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars) - shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') - if shared_config_doc == None: + if self.is_upgrade_run: + shared_config_doc = self.get_shared_config_from_manifest() + else: + shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') + + if shared_config_doc is None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') - + self.set_vault_path(shared_config_doc) main_vars.update(shared_config_doc.specification) @@ -115,7 +121,7 @@ def set_vault_path(self, shared_config): shared_config.specification.vault_tmp_file_location = Config().vault_password_location cluster_name = self.get_cluster_name() shared_config.specification.vault_location = get_ansible_vault_path(cluster_name) - + def get_cluster_name(self): if 'name' in self.cluster_model.specification.keys(): return self.cluster_model.specification.name @@ -128,6 +134,28 @@ def get_clean_cluster_model(self): self.clear_object(cluster_model, 'credentials') return cluster_model + def get_shared_config_from_manifest(self): + # Reuse shared config from existing manifest + # Shared config contains the use_ha_control_plane flag which is required during upgrades + + path_to_manifest = os.path.join(self.inventory_upgrade.build_dir, MANIFEST_FILE_NAME) + if not os.path.isfile(path_to_manifest): + raise Exception('No manifest.yml inside the build folder') + + manifest_docs = load_yamls_file(path_to_manifest) + + cluster_model = select_single(manifest_docs, lambda x: x.kind == 'epiphany-cluster') + + shared_config_doc = select_single(manifest_docs, lambda x: x.kind == 'configuration/shared-config') + shared_config_doc['provider'] = cluster_model['provider'] + + # Merge the shared config doc with defaults + with DefaultMerger([shared_config_doc]) as doc_merger: + shared_config_doc = doc_merger.run()[0] + del shared_config_doc['provider'] + + return shared_config_doc + def clear_object(self, obj_to_clean, key_to_clean): for key, val in obj_to_clean.items(): if key == key_to_clean: