From 22bfb82ae63744d6a51269442c4da01d76791611 Mon Sep 17 00:00:00 2001 From: Anatoli Tsikhamirau Date: Fri, 11 Feb 2022 12:23:37 +0100 Subject: [PATCH] K8s upgrade tasks refactoring (#2956) --- ansible/playbooks/group_vars/all.yml | 2 - .../group_vars/kubernetes_master.yml | 1 + .../playbooks/group_vars/kubernetes_node.yml | 1 + .../roles/upgrade/tasks/kubernetes.yml | 91 ++++++------------- .../tasks/kubernetes/upgrade-master0.yml | 3 + docs/changelogs/CHANGELOG-2.0.md | 1 + 6 files changed, 35 insertions(+), 64 deletions(-) create mode 100644 ansible/playbooks/group_vars/kubernetes_master.yml create mode 100644 ansible/playbooks/group_vars/kubernetes_node.yml diff --git a/ansible/playbooks/group_vars/all.yml b/ansible/playbooks/group_vars/all.yml index dd8af8d460..18d39626d4 100644 --- a/ansible/playbooks/group_vars/all.yml +++ b/ansible/playbooks/group_vars/all.yml @@ -15,5 +15,3 @@ yum_lock_timeout: 300 global_architecture_alias: x86_64: amd64 aarch64: arm64 - - diff --git a/ansible/playbooks/group_vars/kubernetes_master.yml b/ansible/playbooks/group_vars/kubernetes_master.yml new file mode 100644 index 0000000000..766e22a034 --- /dev/null +++ b/ansible/playbooks/group_vars/kubernetes_master.yml @@ -0,0 +1 @@ +node_role: control-plane diff --git a/ansible/playbooks/group_vars/kubernetes_node.yml b/ansible/playbooks/group_vars/kubernetes_node.yml new file mode 100644 index 0000000000..30a442582d --- /dev/null +++ b/ansible/playbooks/group_vars/kubernetes_node.yml @@ -0,0 +1 @@ +node_role: worker diff --git a/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml b/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml index 7ec0fdc930..6f981e4366 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml +++ b/ansible/playbooks/roles/upgrade/tasks/kubernetes.yml @@ -1,5 +1,5 @@ --- -- name: k8s | Wait for kube-apiserver +- name: k8s/{{ node_role }} | Wait for kube-apiserver delegate_to: >- {{ inventory_hostname if inventory_hostname in groups.kubernetes_master else groups.kubernetes_master[0] }} @@ -8,36 +8,21 @@ # During HA control plane upgrade server address in kubeconfig is switched to local for # * compatibility between client and server versions # * identifying correct server version -- name: k8s/master | Switch apiserver address to local +- name: k8s/{{ node_role }} | Switch apiserver address to local include_tasks: kubernetes/utils/set-local-apiserver.yml # sets kubectl_context_cluster when: - groups.kubernetes_master | length > 1 - inventory_hostname in groups.kubernetes_master -- name: Get cluster version and set version facts - delegate_to: >- - {{ inventory_hostname if inventory_hostname in groups.kubernetes_master else - groups.kubernetes_master[0] }} - block: - - name: k8s | Include get-cluster-version.yml - import_tasks: kubernetes/get-cluster-version.yml - - - name: k8s | Set cluster version facts - set_fact: - initial_cluster_version: "{{ _cluster_version }}" - cluster_version: "{{ _cluster_version }}" - vars: - _cluster_version: "{{ (kubectl_cluster_version.stdout | from_yaml).serverVersion.gitVersion }}" - -- name: k8s | Include get-kubelet-version.yml +- name: k8s/{{ node_role }} | Include get-kubelet-version.yml import_tasks: kubernetes/get-kubelet-version.yml delegate_to: "{{ groups.kubernetes_master[0] }}" -- name: k8s | Set kubelet version as fact +- name: k8s/{{ node_role }} | Set kubelet version as fact set_fact: initial_kubelet_version: "{{ kubelet_version.stdout }}" -- name: Check if upgrade state file exists +- name: k8s/{{ node_role }} | Check if upgrade state file exists stat: path: "{{ kubernetes.upgrade_state_file_path }}" get_attributes: false @@ -49,56 +34,38 @@ vars: version: "{{ ver }}" cni_version: "{{ cni_ver }}" + when: + - k8s_upgrade_state_file_status.stat.exists + or initial_kubelet_version is version('v' + version, '<') block: - - name: Upgrade masters - when: - - inventory_hostname in groups.kubernetes_master - - k8s_upgrade_state_file_status.stat.exists - or initial_cluster_version is version('v' + version, '<') - block: - - name: Create K8s upgrade state file on master node - copy: - dest: "{{ kubernetes.upgrade_state_file_path }}" - content: Upgrade started - mode: u=rw,g=r,o= - - - name: k8s | Upgrade first master to v{{ version }} - include_tasks: kubernetes/upgrade-master0.yml - when: - - inventory_hostname == groups.kubernetes_master[0] + - name: k8s/{{ node_role }} | Create K8s upgrade state file + copy: + dest: "{{ kubernetes.upgrade_state_file_path }}" + content: Upgrade started + mode: u=rw,g=r,o= - - name: k8s | Upgrade next master to v{{ version }} - include_tasks: kubernetes/upgrade-masterN.yml - when: - - inventory_hostname in groups.kubernetes_master[1:] + - name: k8s/{{ node_role }} | Upgrade first master to v{{ version }} + include_tasks: kubernetes/upgrade-master0.yml + when: + - inventory_hostname == groups.kubernetes_master[0] - - name: Remove K8s upgrade state file on master node - file: - path: "{{ kubernetes.upgrade_state_file_path }}" - state: absent + - name: k8s/{{ node_role }} | Upgrade next master to v{{ version }} + include_tasks: kubernetes/upgrade-masterN.yml + when: + - inventory_hostname in groups.kubernetes_master[1:] - - name: Upgrade nodes + - name: k8s/{{ node_role }} | Upgrade node to v{{ version }} when: - groups.kubernetes_node is defined - inventory_hostname in groups.kubernetes_node - - k8s_upgrade_state_file_status.stat.exists - or initial_kubelet_version is version('v' + version, '<') - block: - - name: Create K8s upgrade state file on node - copy: - dest: "{{ kubernetes.upgrade_state_file_path }}" - content: Upgrade started - mode: u=rw,g=r,o= - - - name: k8s | Upgrade node to v{{ version }} - include_tasks: kubernetes/upgrade-node.yml + include_tasks: kubernetes/upgrade-node.yml - - name: Remove K8s upgrade state file on node - file: - path: "{{ kubernetes.upgrade_state_file_path }}" - state: absent + - name: k8s/{{ node_role }} | Remove K8s upgrade state file + file: + path: "{{ kubernetes.upgrade_state_file_path }}" + state: absent -- name: k8s/master | Switch apiserver address to HAProxy +- name: k8s/{{ node_role }} | Switch apiserver address to HAProxy command: |- kubectl config set-cluster {{ kubectl_context_cluster.stdout }} --server=https://localhost:3446 when: @@ -106,5 +73,5 @@ - inventory_hostname in groups.kubernetes_master changed_when: true -- name: k8s | Upgrade internal haproxy load-balancer +- name: k8s/{{ node_role }} | Upgrade internal haproxy load-balancer import_tasks: kubernetes/upgrade-haproxy.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml b/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml index 5afc84d972..4fef7acc15 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml +++ b/ansible/playbooks/roles/upgrade/tasks/kubernetes/upgrade-master0.yml @@ -38,6 +38,9 @@ delay: 30 changed_when: false + - name: k8s/master0 | Include set-cluster-version.yml + include_tasks: set-cluster-version.yml # sets cluster_version + # Note: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended since v1.16 - name: k8s/master0 | Upgrade K8s cluster to v{{ version }} command: >- diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md index b3a791b8cd..b285d5c91e 100644 --- a/docs/changelogs/CHANGELOG-2.0.md +++ b/docs/changelogs/CHANGELOG-2.0.md @@ -8,6 +8,7 @@ - [#2701](https://github.com/epiphany-platform/epiphany/issues/2701) - Epicli prepare - generate files in separate directory - [#2812](https://github.com/epiphany-platform/epiphany/issues/2812) - Extend K8s config validation - [#2950](https://github.com/epiphany-platform/epiphany/issues/2950) - CLI refactor to make it more consistant +- [#2844](https://github.com/epiphany-platform/epiphany/issues/2844) - Refactor K8s upgrade task in order to simplify its flow ### Fixed