From bcc3065fae8c08a9ec65741bb8f3bc93b3fd8468 Mon Sep 17 00:00:00 2001 From: Vinayak Goyal Date: Sat, 19 Jun 2021 11:51:05 -0700 Subject: [PATCH] Add e2e tests for rootless control-plane. --- kinder/ci/tools/update-workflows/config.yaml | 12 + .../testinfra/kubeadm-kinder-rootless.yaml | 38 +++ .../templates/workflows/rootless-tasks.yaml | 277 +++++++++++++++++ .../templates/workflows/rootless.yaml | 9 + kinder/ci/workflows/rootless-latest.yaml | 10 + kinder/ci/workflows/rootless-tasks.yaml | 278 ++++++++++++++++++ kinder/cmd/kinder/do/do.go | 7 + kinder/pkg/cluster/manager/actions/actions.go | 10 +- .../cluster/manager/actions/kubeadm-init.go | 14 +- 9 files changed, 648 insertions(+), 7 deletions(-) create mode 100644 kinder/ci/tools/update-workflows/templates/testinfra/kubeadm-kinder-rootless.yaml create mode 100644 kinder/ci/tools/update-workflows/templates/workflows/rootless-tasks.yaml create mode 100644 kinder/ci/tools/update-workflows/templates/workflows/rootless.yaml create mode 100644 kinder/ci/workflows/rootless-latest.yaml create mode 100644 kinder/ci/workflows/rootless-tasks.yaml diff --git a/kinder/ci/tools/update-workflows/config.yaml b/kinder/ci/tools/update-workflows/config.yaml index 4989d855..f964395b 100644 --- a/kinder/ci/tools/update-workflows/config.yaml +++ b/kinder/ci/tools/update-workflows/config.yaml @@ -166,3 +166,15 @@ jobGroups: - ./templates/workflows/upgrade-latest-no-addon-config-maps.yaml jobs: - kubernetesVersion: latest + +- name: rootless + testInfraJobSpec: + targetFile: kubeadm-kinder-rootless.yaml + template: ./templates/testinfra/kubeadm-kinder-rootless.yaml + kinderWorkflowSpec: + targetFile: rootless-{{ .KubernetesVersion }}.yaml + template: ./templates/workflows/rootless.yaml + additionalFiles: + - ./templates/workflows/rootless-tasks.yaml + jobs: + - kubernetesVersion: latest diff --git a/kinder/ci/tools/update-workflows/templates/testinfra/kubeadm-kinder-rootless.yaml b/kinder/ci/tools/update-workflows/templates/testinfra/kubeadm-kinder-rootless.yaml new file mode 100644 index 00000000..3f321b92 --- /dev/null +++ b/kinder/ci/tools/update-workflows/templates/testinfra/kubeadm-kinder-rootless.yaml @@ -0,0 +1,38 @@ +- name: ci-kubernetes-e2e-kubeadm-kinder-rootless-{{ dashVer .KubernetesVersion }} + interval: {{ .JobInterval }} + decorate: true + labels: + preset-dind-enabled: "true" + preset-kind-volume-mounts: "true" + annotations: + testgrid-dashboards: sig-cluster-lifecycle-kubeadm + testgrid-tab-name: kubeadm-kinder-rootless-{{ dashVer .KubernetesVersion }} + testgrid-alert-email: sig-cluster-lifecycle-kubeadm-alerts@kubernetes.io + description: "OWNER: sig-cluster-lifecycle (kinder); Uses kubeadm/kinder to create a cluster with rootless control-plane and run kubeadm-e2e and the conformance suite" + testgrid-num-columns-recent: "20" +{{ .AlertAnnotations }} + decoration_config: + timeout: 60m + extra_refs: + - org: kubernetes + repo: kubernetes + base_ref: {{ branchFor .KubernetesVersion }} + path_alias: k8s.io/kubernetes + - org: kubernetes + repo: kubeadm + base_ref: master + path_alias: k8s.io/kubeadm + spec: + containers: + - image: gcr.io/k8s-testimages/kubekins-e2e:{{ .TestInfraImage }}-{{ imageVer .KubernetesVersion }} + command: + - runner.sh + - "../kubeadm/kinder/ci/kinder-run.sh" + args: + - {{ .WorkflowFile }} + securityContext: + privileged: true + resources: + requests: + memory: "9000Mi" + cpu: 2000m diff --git a/kinder/ci/tools/update-workflows/templates/workflows/rootless-tasks.yaml b/kinder/ci/tools/update-workflows/templates/workflows/rootless-tasks.yaml new file mode 100644 index 00000000..19a1be7e --- /dev/null +++ b/kinder/ci/tools/update-workflows/templates/workflows/rootless-tasks.yaml @@ -0,0 +1,277 @@ +# IMPORTANT! this workflow is imported by regular-* workflows. +version: 1 +summary: | + This workflow implements a sequence of tasks used test the proper functioning + of kubeadm version X with Kubernetes version X. +vars: + # vars defines default values for variable used by tasks in this workflow; + # those values might be overridden when importing this files. + kubernetesVersion: v1.13.5 + controlPlaneNodes: 3 + workerNodes: 2 + baseImage: kindest/base:v20191105-ee880e9b # has containerd + image: kindest/node:test + clusterName: kinder-regular + kubeadmVerbosity: 6 +tasks: +- name: pull-base-image + description: | + pulls kindest/base image with docker in docker and all the prerequisites necessary for running kind(er) + cmd: docker + args: + - pull + - "{{ .vars.baseImage }}" +- name: add-kubernetes-versions + description: | + creates a node-image-variant by adding a Kubernetes version + cmd: kinder + args: + - build + - node-image-variant + - --base-image={{ .vars.baseImage }} + - --image={{ .vars.image }} + - --with-init-artifacts={{ .vars.kubernetesVersion }} + - --loglevel=debug + timeout: 15m +- name: create-cluster + description: | + create a set of nodes ready for hosting the Kubernetes cluster + cmd: kinder + args: + - create + - cluster + - --name={{ .vars.clusterName }} + - --image={{ .vars.image }} + - --control-plane-nodes={{ .vars.controlPlaneNodes }} + - --worker-nodes={{ .vars.workerNodes }} + - --loglevel=debug + timeout: 5m +- name: prepare verify-rootless.sh script + cmd: /bin/sh + args: + - -c + - | + cat </tmp/verify-rootless.sh + #!/usr/bin/env bash + res=0 + users=("kubeadm-kas" "kubeadm-ks" "kubeadm-kcm" "kubeadm-etcd") + for d in ${users[@]}; do + if grep -q "\$d" /etc/passwd ; then + echo "/etc/passwd has user \$d!" + else + echo "ERROR: /etc/passwd does not have user \$d" + res=1 + fi + done + + groups=("kubeadm-kas" "kubeadm-ks" "kubeadm-kcm" "kubeadm-etcd" kubeadm-sa-key-readers) + for d in ${groups[@]}; do + if grep -q "\$d" /etc/group ; then + echo "/etc/group has user \$d!" + else + echo "ERROR: /etc/group does not have user \$d" + res=1 + fi + done + + if pgrep kube-apiserver | xargs ps o user:16 --no-headers -p | grep -q kubeadm-kas ; then + echo "kube-apiserver is running as user kubeadm-kas" + else + echo "ERROR: kube-apiserver is not running as user kubeadm-kas" + res=1 + fi + + if pgrep kube-apiserver | xargs ps o group:16 --no-headers -p | grep -q kubeadm-kas ; then + echo "kube-apiserver is running as user kubeadm-kas" + else + echo "ERROR: kube-apiserver is not running as user kubeadm-kas" + res=1 + fi + + if pgrep kube-apiserver | xargs ps o supgrp:16 --no-headers -p | grep -q kubeadm-sa-key-readers ; then + echo "kube-apiserver is running as supplemental group kubeadm-sa-key-readers" + else + echo "ERROR: kube-apiserver is not running as supplemental group kubeadm-sa-key-readers" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o user:16 --no-headers -p | grep -q kubeadm-kcm ; then + echo "kube-controller-manager is running as user kubeadm-kcm" + else + echo "ERROR: kube-controller-manager is not running as user kubeadm-kcm" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o group:16 --no-headers -p | grep -q kubeadm-kcm ; then + echo "kube-controller-manager is running as user kubeadm-kcm" + else + echo "ERROR: kube-controller-manager is not running as user kubeadm-kcm" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o supgrp:16 --no-headers -p | grep -q kubeadm-sa-key-readers ; then + echo "kube-controller-manager is running as supplemental group kubeadm-sa-key-readers" + else + echo "ERROR: kube-controller-manager is not running as supplemental group kubeadm-sa-key-readers" + res=1 + fi + + if pgrep kube-scheduler | xargs ps o user:16 --no-headers -p | grep -q kubeadm-ks ; then + echo "kube-scheduler is running as user kubeadm-ks" + else + echo "ERROR: kube-scheduler is not running as user kubeadm-ks" + res=1 + fi + + if pgrep kube-scheduler | xargs ps o group:16 --no-headers -p | grep -q kubeadm-ks ; then + echo "kube-scheduler is running as user kubeadm-ks" + else + echo "ERROR: kube-scheduler is not running as user kubeadm-ks" + res=1 + fi + + if pgrep etcd | xargs ps o user:16 --no-headers -p | grep -q kubeadm-etcd ; then + echo "etcd is running as user kubeadm-etcd" + else + echo "ERROR: etcd is not running as user kubeadm-etcd" + res=1 + fi + + if pgrep etcd | xargs ps o group:16 --no-headers -p | grep -q kubeadm-etcd ; then + echo "etcd is running as user kubeadm-etcd" + else + echo "ERROR: etcd is not running as user kubeadm-etcd" + res=1 + fi + + if [[ "\${res}" = 0 ]]; then + echo "All verify checks passed, congrats!" + echo "" + else + echo "One or more verify checks failed! See output above..." + echo "" + exit 1 + fi + EOF + + chmod +x /tmp/verify-rootless.sh +- name: copy verify-rootless.sh on controlplane nodes + cmd: kinder + args: + - cp + - --name={{ .vars.clusterName }} + - /tmp/verify-rootless.sh + - "@cp*:/kinder/verify-rootless.sh" + - --loglevel=debug +- name: init + description: | + Initializes the Kubernetes cluster with version "initVersion" + by starting the boostrap control-plane nodes + cmd: kinder + args: + - do + - kubeadm-init + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + - --feature-gates="RootlessControlPlane=true" + timeout: 5m +- name: join + description: | + Join the other nodes to the Kubernetes cluster + cmd: kinder + args: + - do + - kubeadm-join + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + timeout: 10m +- name: run verify-rootless.sh on controlplane nodes before upgrades + cmd: kinder + args: + - exec + - --name={{ .vars.clusterName }} + - "@cp*" + - /kinder/verify-rootless.sh + - --loglevel=debug +- name: e2e-kubeadm + description: | + Runs kubeadm e2e tests + cmd: kinder + args: + - test + - e2e-kubeadm + - --test-flags=--report-dir={{ .env.ARTIFACTS }} --report-prefix=e2e-kubeadm + - --name={{ .vars.clusterName }} + - --loglevel=debug + timeout: 10m +- name: e2e + description: | + Runs Kubernetes e2e test (conformance) + cmd: kinder + args: + - test + - e2e + - --test-flags=--report-dir={{ .env.ARTIFACTS }} --report-prefix=e2e + - --parallel + - --name={{ .vars.clusterName }} + - --loglevel=debug + timeout: 35m +- name: upgrade + description: | + upgrades the cluster to Kubernetes "upgradeVersion" + cmd: kinder + args: + - do + - kubeadm-upgrade + - --upgrade-version={{ .vars.kubernetesVersion }} + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + timeout: 15m +- name: run verify-rootless.sh on controlplane nodes before upgrades + cmd: kinder + args: + - exec + - --name={{ .vars.clusterName }} + - "@cp*" + - /kinder/verify-rootless.sh + - --loglevel=debug +- name: get-logs + description: | + Collects all the test logs + cmd: kinder + args: + - export + - logs + - --loglevel=debug + - --name={{ .vars.clusterName }} + - "{{ .env.ARTIFACTS }}" + force: true + timeout: 5m + # kind export log is know to be flaky, so we are temporary ignoring errors in order + # to make the test pass in case everything else passed + # see https://github.com/kubernetes-sigs/kind/issues/456 + ignoreError: true +- name: reset + description: | + Exec kubeadm reset + cmd: kinder + args: + - do + - kubeadm-reset + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + force: true +- name: delete + description: | + Deletes the cluster + cmd: kinder + args: + - delete + - cluster + - --name={{ .vars.clusterName }} + - --loglevel=debug + force: true diff --git a/kinder/ci/tools/update-workflows/templates/workflows/rootless.yaml b/kinder/ci/tools/update-workflows/templates/workflows/rootless.yaml new file mode 100644 index 00000000..dfa343fc --- /dev/null +++ b/kinder/ci/tools/update-workflows/templates/workflows/rootless.yaml @@ -0,0 +1,9 @@ +version: 1 +summary: | + This workflow tests the proper functioning of the {{ .KubernetesVersion }} version of both kubeadm and Kubernetes + test grid > https://testgrid.k8s.io/sig-cluster-lifecycle-kubeadm#kubeadm-kinder-rootless{{ dashVer .KubernetesVersion }} + config > https://git.k8s.io/test-infra/config/jobs/kubernetes/sig-cluster-lifecycle/{{ .TargetFile }} +vars: + kubernetesVersion: "\{\{ resolve `ci/{{ ciLabelFor .KubernetesVersion }}` \}\}" +tasks: +- import: rootless-tasks.yaml diff --git a/kinder/ci/workflows/rootless-latest.yaml b/kinder/ci/workflows/rootless-latest.yaml new file mode 100644 index 00000000..ed361fdd --- /dev/null +++ b/kinder/ci/workflows/rootless-latest.yaml @@ -0,0 +1,10 @@ +# AUTOGENERATED by https://git.k8s.io/kubeadm/kinder/ci/tools/update-workflows +version: 1 +summary: | + This workflow tests the proper functioning of the latest version of both kubeadm and Kubernetes + test grid > https://testgrid.k8s.io/sig-cluster-lifecycle-kubeadm#kubeadm-kinder-rootlesslatest + config > https://git.k8s.io/test-infra/config/jobs/kubernetes/sig-cluster-lifecycle/kubeadm-kinder-rootless.yaml +vars: + kubernetesVersion: "{{ resolve `ci/latest` }}" +tasks: +- import: rootless-tasks.yaml diff --git a/kinder/ci/workflows/rootless-tasks.yaml b/kinder/ci/workflows/rootless-tasks.yaml new file mode 100644 index 00000000..e9bdc1b6 --- /dev/null +++ b/kinder/ci/workflows/rootless-tasks.yaml @@ -0,0 +1,278 @@ +# AUTOGENERATED by https://git.k8s.io/kubeadm/kinder/ci/tools/update-workflows +# IMPORTANT! this workflow is imported by regular-* workflows. +version: 1 +summary: | + This workflow implements a sequence of tasks used test the proper functioning + of kubeadm version X with Kubernetes version X. +vars: + # vars defines default values for variable used by tasks in this workflow; + # those values might be overridden when importing this files. + kubernetesVersion: v1.13.5 + controlPlaneNodes: 3 + workerNodes: 2 + baseImage: kindest/base:v20191105-ee880e9b # has containerd + image: kindest/node:test + clusterName: kinder-regular + kubeadmVerbosity: 6 +tasks: +- name: pull-base-image + description: | + pulls kindest/base image with docker in docker and all the prerequisites necessary for running kind(er) + cmd: docker + args: + - pull + - "{{ .vars.baseImage }}" +- name: add-kubernetes-versions + description: | + creates a node-image-variant by adding a Kubernetes version + cmd: kinder + args: + - build + - node-image-variant + - --base-image={{ .vars.baseImage }} + - --image={{ .vars.image }} + - --with-init-artifacts={{ .vars.kubernetesVersion }} + - --loglevel=debug + timeout: 15m +- name: create-cluster + description: | + create a set of nodes ready for hosting the Kubernetes cluster + cmd: kinder + args: + - create + - cluster + - --name={{ .vars.clusterName }} + - --image={{ .vars.image }} + - --control-plane-nodes={{ .vars.controlPlaneNodes }} + - --worker-nodes={{ .vars.workerNodes }} + - --loglevel=debug + timeout: 5m +- name: prepare verify-rootless.sh script + cmd: /bin/sh + args: + - -c + - | + cat </tmp/verify-rootless.sh + #!/usr/bin/env bash + res=0 + users=("kubeadm-kas" "kubeadm-ks" "kubeadm-kcm" "kubeadm-etcd") + for d in ${users[@]}; do + if grep -q "\$d" /etc/passwd ; then + echo "/etc/passwd has user \$d!" + else + echo "ERROR: /etc/passwd does not have user \$d" + res=1 + fi + done + + groups=("kubeadm-kas" "kubeadm-ks" "kubeadm-kcm" "kubeadm-etcd" kubeadm-sa-key-readers) + for d in ${groups[@]}; do + if grep -q "\$d" /etc/group ; then + echo "/etc/group has user \$d!" + else + echo "ERROR: /etc/group does not have user \$d" + res=1 + fi + done + + if pgrep kube-apiserver | xargs ps o user:16 --no-headers -p | grep -q kubeadm-kas ; then + echo "kube-apiserver is running as user kubeadm-kas" + else + echo "ERROR: kube-apiserver is not running as user kubeadm-kas" + res=1 + fi + + if pgrep kube-apiserver | xargs ps o group:16 --no-headers -p | grep -q kubeadm-kas ; then + echo "kube-apiserver is running as user kubeadm-kas" + else + echo "ERROR: kube-apiserver is not running as user kubeadm-kas" + res=1 + fi + + if pgrep kube-apiserver | xargs ps o supgrp:16 --no-headers -p | grep -q kubeadm-sa-key-readers ; then + echo "kube-apiserver is running as supplemental group kubeadm-sa-key-readers" + else + echo "ERROR: kube-apiserver is not running as supplemental group kubeadm-sa-key-readers" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o user:16 --no-headers -p | grep -q kubeadm-kcm ; then + echo "kube-controller-manager is running as user kubeadm-kcm" + else + echo "ERROR: kube-controller-manager is not running as user kubeadm-kcm" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o group:16 --no-headers -p | grep -q kubeadm-kcm ; then + echo "kube-controller-manager is running as user kubeadm-kcm" + else + echo "ERROR: kube-controller-manager is not running as user kubeadm-kcm" + res=1 + fi + + if pgrep kube-controller-manager | xargs ps o supgrp:16 --no-headers -p | grep -q kubeadm-sa-key-readers ; then + echo "kube-controller-manager is running as supplemental group kubeadm-sa-key-readers" + else + echo "ERROR: kube-controller-manager is not running as supplemental group kubeadm-sa-key-readers" + res=1 + fi + + if pgrep kube-scheduler | xargs ps o user:16 --no-headers -p | grep -q kubeadm-ks ; then + echo "kube-scheduler is running as user kubeadm-ks" + else + echo "ERROR: kube-scheduler is not running as user kubeadm-ks" + res=1 + fi + + if pgrep kube-scheduler | xargs ps o group:16 --no-headers -p | grep -q kubeadm-ks ; then + echo "kube-scheduler is running as user kubeadm-ks" + else + echo "ERROR: kube-scheduler is not running as user kubeadm-ks" + res=1 + fi + + if pgrep etcd | xargs ps o user:16 --no-headers -p | grep -q kubeadm-etcd ; then + echo "etcd is running as user kubeadm-etcd" + else + echo "ERROR: etcd is not running as user kubeadm-etcd" + res=1 + fi + + if pgrep etcd | xargs ps o group:16 --no-headers -p | grep -q kubeadm-etcd ; then + echo "etcd is running as user kubeadm-etcd" + else + echo "ERROR: etcd is not running as user kubeadm-etcd" + res=1 + fi + + if [[ "\${res}" = 0 ]]; then + echo "All verify checks passed, congrats!" + echo "" + else + echo "One or more verify checks failed! See output above..." + echo "" + exit 1 + fi + EOF + + chmod +x /tmp/verify-rootless.sh +- name: copy verify-rootless.sh on controlplane nodes + cmd: kinder + args: + - cp + - --name={{ .vars.clusterName }} + - /tmp/verify-rootless.sh + - "@cp*:/kinder/verify-rootless.sh" + - --loglevel=debug +- name: init + description: | + Initializes the Kubernetes cluster with version "initVersion" + by starting the boostrap control-plane nodes + cmd: kinder + args: + - do + - kubeadm-init + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + - --feature-gates="RootlessControlPlane=true" + timeout: 5m +- name: join + description: | + Join the other nodes to the Kubernetes cluster + cmd: kinder + args: + - do + - kubeadm-join + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + timeout: 10m +- name: run verify-rootless.sh on controlplane nodes before upgrades + cmd: kinder + args: + - exec + - --name={{ .vars.clusterName }} + - "@cp*" + - /kinder/verify-rootless.sh + - --loglevel=debug +- name: e2e-kubeadm + description: | + Runs kubeadm e2e tests + cmd: kinder + args: + - test + - e2e-kubeadm + - --test-flags=--report-dir={{ .env.ARTIFACTS }} --report-prefix=e2e-kubeadm + - --name={{ .vars.clusterName }} + - --loglevel=debug + timeout: 10m +- name: e2e + description: | + Runs Kubernetes e2e test (conformance) + cmd: kinder + args: + - test + - e2e + - --test-flags=--report-dir={{ .env.ARTIFACTS }} --report-prefix=e2e + - --parallel + - --name={{ .vars.clusterName }} + - --loglevel=debug + timeout: 35m +- name: upgrade + description: | + upgrades the cluster to Kubernetes "upgradeVersion" + cmd: kinder + args: + - do + - kubeadm-upgrade + - --upgrade-version={{ .vars.kubernetesVersion }} + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + timeout: 15m +- name: run verify-rootless.sh on controlplane nodes before upgrades + cmd: kinder + args: + - exec + - --name={{ .vars.clusterName }} + - "@cp*" + - /kinder/verify-rootless.sh + - --loglevel=debug +- name: get-logs + description: | + Collects all the test logs + cmd: kinder + args: + - export + - logs + - --loglevel=debug + - --name={{ .vars.clusterName }} + - "{{ .env.ARTIFACTS }}" + force: true + timeout: 5m + # kind export log is know to be flaky, so we are temporary ignoring errors in order + # to make the test pass in case everything else passed + # see https://github.com/kubernetes-sigs/kind/issues/456 + ignoreError: true +- name: reset + description: | + Exec kubeadm reset + cmd: kinder + args: + - do + - kubeadm-reset + - --name={{ .vars.clusterName }} + - --loglevel=debug + - --kubeadm-verbosity={{ .vars.kubeadmVerbosity }} + force: true +- name: delete + description: | + Deletes the cluster + cmd: kinder + args: + - delete + - cluster + - --name={{ .vars.clusterName }} + - --loglevel=debug + force: true diff --git a/kinder/cmd/kinder/do/do.go b/kinder/cmd/kinder/do/do.go index b849d091..5f8fe774 100644 --- a/kinder/cmd/kinder/do/do.go +++ b/kinder/cmd/kinder/do/do.go @@ -43,6 +43,7 @@ type flagpole struct { Wait time.Duration IgnorePreflightErrors string KubeadmConfigVersion string + FeatureGates string } // NewCommand returns a new cobra.Command for exec @@ -122,6 +123,11 @@ func NewCommand() *cobra.Command { "If not set, kubeadm will automatically choose the kubeadm config version "+ "according to the Kubernetes version in use", ) + cmd.Flags().StringVar( + &flags.FeatureGates, + "kubeadm-feature-gates", flags.FeatureGates, + "the kubeadm feature-gates to be used for init, join and upgrade", + ) return cmd } @@ -175,6 +181,7 @@ func runE(flags *flagpole, cmd *cobra.Command, args []string) (err error) { actions.PatchesDir(flags.PatchesDir), actions.IgnorePreflightErrors(flags.IgnorePreflightErrors), actions.KubeadmConfigVersion(flags.KubeadmConfigVersion), + actions.FeatureGates(flags.FeatureGates), ) if err != nil { return errors.Wrapf(err, "failed to exec action %s", action) diff --git a/kinder/pkg/cluster/manager/actions/actions.go b/kinder/pkg/cluster/manager/actions/actions.go index 6238d897..8159eed4 100644 --- a/kinder/pkg/cluster/manager/actions/actions.go +++ b/kinder/pkg/cluster/manager/actions/actions.go @@ -42,7 +42,7 @@ var actionRegistry = map[string]func(*status.Cluster, *RunOptions) error{ return KubeadmConfig(c, flags.kubeadmConfigVersion, flags.copyCertsMode, flags.discoveryMode, c.K8sNodes().EligibleForActions()...) }, "kubeadm-init": func(c *status.Cluster, flags *RunOptions) error { - return KubeadmInit(c, flags.usePhases, flags.copyCertsMode, flags.kubeadmConfigVersion, flags.patchesDir, flags.ignorePreflightErrors, flags.wait, flags.vLevel) + return KubeadmInit(c, flags.usePhases, flags.copyCertsMode, flags.kubeadmConfigVersion, flags.patchesDir, flags.ignorePreflightErrors, flags.featureGates, flags.wait, flags.vLevel) }, "kubeadm-join": func(c *status.Cluster, flags *RunOptions) error { return KubeadmJoin(c, flags.usePhases, flags.copyCertsMode, flags.discoveryMode, flags.kubeadmConfigVersion, flags.patchesDir, flags.ignorePreflightErrors, flags.wait, flags.vLevel) @@ -145,6 +145,13 @@ func KubeadmConfigVersion(kubeadmConfigVersion string) Option { } } +// FeatureGates option sets the kubeadm feature-gates for the kubeadm commands +func FeatureGates(featureGates string) Option { + return func(r *RunOptions) { + r.featureGates = featureGates + } +} + // RunOptions holds options supplied to actions.Run type RunOptions struct { usePhases bool @@ -156,6 +163,7 @@ type RunOptions struct { patchesDir string ignorePreflightErrors string kubeadmConfigVersion string + featureGates string } // DiscoveryMode defines discovery mode supported by kubeadm join diff --git a/kinder/pkg/cluster/manager/actions/kubeadm-init.go b/kinder/pkg/cluster/manager/actions/kubeadm-init.go index 12e8ed20..1975d919 100644 --- a/kinder/pkg/cluster/manager/actions/kubeadm-init.go +++ b/kinder/pkg/cluster/manager/actions/kubeadm-init.go @@ -37,7 +37,7 @@ import ( // KubeadmInit executes the kubeadm init workflow including also post init task // like installing the CNI network plugin -func KubeadmInit(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, wait time.Duration, vLevel int) (err error) { +func KubeadmInit(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors, featureGates string, wait time.Duration, vLevel int) (err error) { cp1 := c.BootstrapControlPlane() // if patcheDir is defined, copy the patches to the node @@ -72,9 +72,9 @@ func KubeadmInit(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, // execs the kubeadm init workflow if usePhases { - err = kubeadmInitWithPhases(cp1, copyCertsMode, patchesDir, ignorePreflightErrors, vLevel) + err = kubeadmInitWithPhases(cp1, copyCertsMode, patchesDir, ignorePreflightErrors, featureGates, vLevel) } else { - err = kubeadmInit(cp1, copyCertsMode, patchesDir, ignorePreflightErrors, vLevel) + err = kubeadmInit(cp1, copyCertsMode, patchesDir, ignorePreflightErrors, featureGates, vLevel) } if err != nil { return err @@ -88,12 +88,13 @@ func KubeadmInit(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, return nil } -func kubeadmInit(cp1 *status.Node, copyCertsMode CopyCertsMode, patchesDir, ignorePreflightErrors string, vLevel int) error { +func kubeadmInit(cp1 *status.Node, copyCertsMode CopyCertsMode, patchesDir, ignorePreflightErrors, featureGates string, vLevel int) error { initArgs := []string{ "init", fmt.Sprintf("--ignore-preflight-errors=%s", ignorePreflightErrors), fmt.Sprintf("--config=%s", constants.KubeadmConfigPath), fmt.Sprintf("--v=%d", vLevel), + fmt.Sprintf("--feature-gates=%s", featureGates), } if copyCertsMode == CopyCertsModeAuto { initArgs = append(initArgs, @@ -114,7 +115,7 @@ func kubeadmInit(cp1 *status.Node, copyCertsMode CopyCertsMode, patchesDir, igno return nil } -func kubeadmInitWithPhases(cp1 *status.Node, copyCertsMode CopyCertsMode, patchesDir, ignorePreflightErrors string, vLevel int) error { +func kubeadmInitWithPhases(cp1 *status.Node, copyCertsMode CopyCertsMode, patchesDir, ignorePreflightErrors, featureGates string, vLevel int) error { if err := cp1.Command( "kubeadm", "init", "phase", "preflight", fmt.Sprintf("--config=%s", constants.KubeadmConfigPath), fmt.Sprintf("--v=%d", vLevel), fmt.Sprintf("--ignore-preflight-errors=%s", ignorePreflightErrors), @@ -142,6 +143,7 @@ func kubeadmInitWithPhases(cp1 *status.Node, copyCertsMode CopyCertsMode, patche controlplaneArgs := []string{ "init", "phase", "control-plane", "all", fmt.Sprintf("--config=%s", constants.KubeadmConfigPath), fmt.Sprintf("--v=%d", vLevel), + fmt.Sprintf("--feature-gates=%s", featureGates), } if patchesDir != "" { controlplaneArgs = append(controlplaneArgs, "--experimental-patches", constants.PatchesDir) @@ -153,7 +155,7 @@ func kubeadmInitWithPhases(cp1 *status.Node, copyCertsMode CopyCertsMode, patche } etcdArgs := []string{ - "init", "phase", "etcd", "local", fmt.Sprintf("--config=%s", constants.KubeadmConfigPath), fmt.Sprintf("--v=%d", vLevel), + "init", "phase", "etcd", "local", fmt.Sprintf("--config=%s", constants.KubeadmConfigPath), fmt.Sprintf("--v=%d", vLevel), fmt.Sprintf("--feature-gates=%s", featureGates), } if patchesDir != "" { etcdArgs = append(etcdArgs, "--experimental-patches", constants.PatchesDir)