From 5c19ea1f6099e68539146aa40a7631bda618c6c1 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 11 May 2021 14:28:59 +0200 Subject: [PATCH] Make KCP using embedded kubeadm types while manipulating the kubeadm-config ConfigMap --- .../controllers/kubeadmconfig_controller.go | 25 +- bootstrap/kubeadm/types/utils.go | 28 +- bootstrap/kubeadm/types/utils_test.go | 73 +- .../kubeadm/controllers/controller.go | 17 +- .../kubeadm/controllers/controller_test.go | 13 +- .../kubeadm/controllers/fakes_test.go | 6 +- .../kubeadm/controllers/remediation.go | 8 +- .../kubeadm/controllers/remediation_test.go | 3 + controlplane/kubeadm/controllers/scale.go | 8 +- .../kubeadm/controllers/scale_test.go | 12 +- controlplane/kubeadm/controllers/upgrade.go | 10 +- .../kubeadm/internal/kubeadm_config_map.go | 295 ------ .../internal/kubeadm_config_map_test.go | 978 ------------------ .../kubeadm/internal/workload_cluster.go | 226 ++-- .../internal/workload_cluster_coredns.go | 23 +- .../internal/workload_cluster_coredns_test.go | 150 ++- .../kubeadm/internal/workload_cluster_etcd.go | 30 +- .../internal/workload_cluster_etcd_test.go | 200 ++-- .../kubeadm/internal/workload_cluster_test.go | 870 +++++++++------- 19 files changed, 973 insertions(+), 2002 deletions(-) delete mode 100644 controlplane/kubeadm/internal/kubeadm_config_map.go delete mode 100644 controlplane/kubeadm/internal/kubeadm_config_map_test.go diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go index e205d502d833..2260b2ba620f 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go @@ -22,6 +22,7 @@ import ( "strconv" "time" + "github.com/blang/semver" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -367,6 +368,10 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // should not handle special cases. kubernetesVersion := scope.ConfigOwner.KubernetesVersion() + parsedVersion, err := semver.ParseTolerant(kubernetesVersion) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion) + } if scope.Config.Spec.InitConfiguration == nil { scope.Config.Spec.InitConfiguration = &bootstrapv1.InitConfiguration{ @@ -376,7 +381,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex }, } } - initdata, err := kubeadmtypes.MarshalInitConfigurationForVersion(scope.Config.Spec.InitConfiguration, kubernetesVersion) + initdata, err := kubeadmtypes.MarshalInitConfigurationForVersion(scope.Config.Spec.InitConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal init configuration") return ctrl.Result{}, err @@ -394,7 +399,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // injects into config.ClusterConfiguration values from top level object r.reconcileTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config) - clusterdata, err := kubeadmtypes.MarshalClusterConfigurationForVersion(scope.Config.Spec.ClusterConfiguration, kubernetesVersion) + clusterdata, err := kubeadmtypes.MarshalClusterConfigurationForVersion(scope.Config.Spec.ClusterConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal cluster configuration") return ctrl.Result{}, err @@ -476,7 +481,13 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return res, nil } - joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, scope.ConfigOwner.KubernetesVersion()) + kubernetesVersion := scope.ConfigOwner.KubernetesVersion() + parsedVersion, err := semver.ParseTolerant(kubernetesVersion) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion) + } + + joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal join configuration") return ctrl.Result{}, err @@ -557,7 +568,13 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S return res, nil } - joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, scope.ConfigOwner.KubernetesVersion()) + kubernetesVersion := scope.ConfigOwner.KubernetesVersion() + parsedVersion, err := semver.ParseTolerant(kubernetesVersion) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion) + } + + joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal join configuration") return ctrl.Result{}, err diff --git a/bootstrap/kubeadm/types/utils.go b/bootstrap/kubeadm/types/utils.go index 93e4baf2368f..ef1b55c73872 100644 --- a/bootstrap/kubeadm/types/utils.go +++ b/bootstrap/kubeadm/types/utils.go @@ -17,11 +17,11 @@ limitations under the License. package utils import ( + "github.com/blang/semver" "github.com/pkg/errors" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - versionutil "k8s.io/apimachinery/pkg/util/version" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta2" @@ -30,6 +30,9 @@ import ( ) var ( + v1beta1KubeadmVersion = semver.MustParse("1.13.0") + v1beta2KubeadmVersion = semver.MustParse("1.15.0") + clusterConfigurationVersionTypeMap = map[schema.GroupVersion]conversion.Convertible{ v1beta2.GroupVersion: &v1beta2.ClusterConfiguration{}, v1beta1.GroupVersion: &v1beta1.ClusterConfiguration{}, @@ -51,18 +54,11 @@ var ( } ) -func KubeVersionToKubeadmAPIGroupVersion(version string) (schema.GroupVersion, error) { - if version == "" { - return schema.GroupVersion{}, errors.New("version cannot be empty") - } - semVersion, err := versionutil.ParseSemantic(version) - if err != nil { - return schema.GroupVersion{}, errors.Wrap(err, "error parsing the Kubernetes version") - } +func KubeVersionToKubeadmAPIGroupVersion(version semver.Version) (schema.GroupVersion, error) { switch { - case semVersion.LessThan(versionutil.MustParseSemantic("v1.13.0")): + case version.LT(v1beta1KubeadmVersion): return schema.GroupVersion{}, errors.New("the bootstrap provider for kubeadm doesn't support Kubernetes version lower than v1.13.0") - case semVersion.LessThan(versionutil.MustParseSemantic("v1.15.0")): + case version.LT(v1beta2KubeadmVersion): // NOTE: All the Kubernetes version >= v1.13 and < v1.15 should use the kubeadm API version v1beta1 return v1beta1.GroupVersion, nil default: @@ -79,32 +75,32 @@ func KubeVersionToKubeadmAPIGroupVersion(version string) (schema.GroupVersion, e // MarshalClusterConfigurationForVersion converts a Cluster API ClusterConfiguration type to the kubeadm API type // for the given Kubernetes Version. // NOTE: This assumes Kubernetes Version equals to kubeadm version. -func MarshalClusterConfigurationForVersion(obj *bootstrapv1.ClusterConfiguration, version string) (string, error) { +func MarshalClusterConfigurationForVersion(obj *bootstrapv1.ClusterConfiguration, version semver.Version) (string, error) { return marshalForVersion(obj, version, clusterConfigurationVersionTypeMap) } // MarshalClusterStatusForVersion converts a Cluster API ClusterStatus type to the kubeadm API type // for the given Kubernetes Version. // NOTE: This assumes Kubernetes Version equals to kubeadm version. -func MarshalClusterStatusForVersion(obj *bootstrapv1.ClusterStatus, version string) (string, error) { +func MarshalClusterStatusForVersion(obj *bootstrapv1.ClusterStatus, version semver.Version) (string, error) { return marshalForVersion(obj, version, clusterStatusVersionTypeMap) } // MarshalInitConfigurationForVersion converts a Cluster API InitConfiguration type to the kubeadm API type // for the given Kubernetes Version. // NOTE: This assumes Kubernetes Version equals to kubeadm version. -func MarshalInitConfigurationForVersion(obj *bootstrapv1.InitConfiguration, version string) (string, error) { +func MarshalInitConfigurationForVersion(obj *bootstrapv1.InitConfiguration, version semver.Version) (string, error) { return marshalForVersion(obj, version, initConfigurationVersionTypeMap) } // MarshalJoinConfigurationForVersion converts a Cluster API JoinConfiguration type to the kubeadm API type // for the given Kubernetes Version. // NOTE: This assumes Kubernetes Version equals to kubeadm version. -func MarshalJoinConfigurationForVersion(obj *bootstrapv1.JoinConfiguration, version string) (string, error) { +func MarshalJoinConfigurationForVersion(obj *bootstrapv1.JoinConfiguration, version semver.Version) (string, error) { return marshalForVersion(obj, version, joinConfigurationVersionTypeMap) } -func marshalForVersion(obj conversion.Hub, version string, kubeadmObjVersionTypeMap map[schema.GroupVersion]conversion.Convertible) (string, error) { +func marshalForVersion(obj conversion.Hub, version semver.Version, kubeadmObjVersionTypeMap map[schema.GroupVersion]conversion.Convertible) (string, error) { kubeadmAPIGroupVersion, err := KubeVersionToKubeadmAPIGroupVersion(version) if err != nil { return "", err diff --git a/bootstrap/kubeadm/types/utils_test.go b/bootstrap/kubeadm/types/utils_test.go index 1fa6cf2f8104..9127eb1309a1 100644 --- a/bootstrap/kubeadm/types/utils_test.go +++ b/bootstrap/kubeadm/types/utils_test.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "github.com/blang/semver" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime/schema" @@ -29,7 +30,7 @@ import ( func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { type args struct { - k8sVersion string + version semver.Version } tests := []struct { name string @@ -40,7 +41,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "fails when kubernetes version is too old", args: args{ - k8sVersion: "v1.12.0", + version: semver.MustParse("1.12.0"), }, want: schema.GroupVersion{}, wantErr: true, @@ -48,7 +49,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "pass with minimum kubernetes version for kubeadm API v1beta1", args: args{ - k8sVersion: "v1.13.0", + version: semver.MustParse("1.13.0"), }, want: v1beta1.GroupVersion, wantErr: false, @@ -56,7 +57,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "pass with kubernetes version for kubeadm API v1beta1", args: args{ - k8sVersion: "v1.14.99", + version: semver.MustParse("1.14.99"), }, want: v1beta1.GroupVersion, wantErr: false, @@ -64,7 +65,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "pass with minimum kubernetes version for kubeadm API v1beta2", args: args{ - k8sVersion: "v1.15.0", + version: semver.MustParse("1.15.0"), }, want: v1beta2.GroupVersion, wantErr: false, @@ -72,7 +73,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "pass with kubernetes version for kubeadm API v1beta2", args: args{ - k8sVersion: "v1.20.99", + version: semver.MustParse("1.20.99"), }, want: v1beta2.GroupVersion, wantErr: false, @@ -80,7 +81,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { { name: "pass with future kubernetes version", args: args{ - k8sVersion: "v99.99.99", + version: semver.MustParse("99.99.99"), }, want: v1beta2.GroupVersion, wantErr: false, @@ -90,7 +91,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := KubeVersionToKubeadmAPIGroupVersion(tt.args.k8sVersion) + got, err := KubeVersionToKubeadmAPIGroupVersion(tt.args.version) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -103,8 +104,8 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { func TestMarshalClusterConfigurationForVersion(t *testing.T) { type args struct { - capiObj *bootstrapv1.ClusterConfiguration - k8sVersion string + capiObj *bootstrapv1.ClusterConfiguration + version semver.Version } tests := []struct { name string @@ -115,8 +116,8 @@ func TestMarshalClusterConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta1 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.ClusterConfiguration{}, - k8sVersion: "v1.14.9", + capiObj: &bootstrapv1.ClusterConfiguration{}, + version: semver.MustParse("1.14.9"), }, want: "apiServer: {}\n" + "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + @@ -131,8 +132,8 @@ func TestMarshalClusterConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta2 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.ClusterConfiguration{}, - k8sVersion: "v1.15.0", + capiObj: &bootstrapv1.ClusterConfiguration{}, + version: semver.MustParse("1.15.0"), }, want: "apiServer: {}\n" + "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + @@ -149,7 +150,7 @@ func TestMarshalClusterConfigurationForVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := MarshalClusterConfigurationForVersion(tt.args.capiObj, tt.args.k8sVersion) + got, err := MarshalClusterConfigurationForVersion(tt.args.capiObj, tt.args.version) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -162,8 +163,8 @@ func TestMarshalClusterConfigurationForVersion(t *testing.T) { func TestMarshalClusterStatusForVersion(t *testing.T) { type args struct { - capiObj *bootstrapv1.ClusterStatus - k8sVersion string + capiObj *bootstrapv1.ClusterStatus + version semver.Version } tests := []struct { name string @@ -174,8 +175,8 @@ func TestMarshalClusterStatusForVersion(t *testing.T) { { name: "Generates a v1beta1 kubeadm status", args: args{ - capiObj: &bootstrapv1.ClusterStatus{}, - k8sVersion: "v1.14.9", + capiObj: &bootstrapv1.ClusterStatus{}, + version: semver.MustParse("1.14.9"), }, want: "apiEndpoints: null\n" + "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + @@ -185,8 +186,8 @@ func TestMarshalClusterStatusForVersion(t *testing.T) { { name: "Generates a v1beta2 kubeadm status", args: args{ - capiObj: &bootstrapv1.ClusterStatus{}, - k8sVersion: "v1.15.0", + capiObj: &bootstrapv1.ClusterStatus{}, + version: semver.MustParse("1.15.0"), }, want: "apiEndpoints: null\n" + "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + @@ -198,7 +199,7 @@ func TestMarshalClusterStatusForVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := MarshalClusterStatusForVersion(tt.args.capiObj, tt.args.k8sVersion) + got, err := MarshalClusterStatusForVersion(tt.args.capiObj, tt.args.version) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -211,8 +212,8 @@ func TestMarshalClusterStatusForVersion(t *testing.T) { func TestMarshalInitConfigurationForVersion(t *testing.T) { type args struct { - capiObj *bootstrapv1.InitConfiguration - k8sVersion string + capiObj *bootstrapv1.InitConfiguration + version semver.Version } tests := []struct { name string @@ -223,8 +224,8 @@ func TestMarshalInitConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta1 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.InitConfiguration{}, - k8sVersion: "v1.14.9", + capiObj: &bootstrapv1.InitConfiguration{}, + version: semver.MustParse("1.14.9"), }, want: "apiVersion: kubeadm.k8s.io/v1beta1\n" + "kind: InitConfiguration\n" + @@ -237,8 +238,8 @@ func TestMarshalInitConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta2 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.InitConfiguration{}, - k8sVersion: "v1.15.0", + capiObj: &bootstrapv1.InitConfiguration{}, + version: semver.MustParse("1.15.0"), }, want: "apiVersion: kubeadm.k8s.io/v1beta2\n" + "kind: InitConfiguration\n" + @@ -251,7 +252,7 @@ func TestMarshalInitConfigurationForVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := MarshalInitConfigurationForVersion(tt.args.capiObj, tt.args.k8sVersion) + got, err := MarshalInitConfigurationForVersion(tt.args.capiObj, tt.args.version) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -264,8 +265,8 @@ func TestMarshalInitConfigurationForVersion(t *testing.T) { func TestMarshalJoinConfigurationForVersion(t *testing.T) { type args struct { - capiObj *bootstrapv1.JoinConfiguration - k8sVersion string + capiObj *bootstrapv1.JoinConfiguration + version semver.Version } tests := []struct { name string @@ -276,8 +277,8 @@ func TestMarshalJoinConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta1 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.JoinConfiguration{}, - k8sVersion: "v1.14.9", + capiObj: &bootstrapv1.JoinConfiguration{}, + version: semver.MustParse("1.14.9"), }, want: "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + "discovery: {}\n" + @@ -288,8 +289,8 @@ func TestMarshalJoinConfigurationForVersion(t *testing.T) { { name: "Generates a v1beta2 kubeadm configuration", args: args{ - capiObj: &bootstrapv1.JoinConfiguration{}, - k8sVersion: "v1.15.0", + capiObj: &bootstrapv1.JoinConfiguration{}, + version: semver.MustParse("1.15.0"), }, want: "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + "discovery: {}\n" + @@ -302,7 +303,7 @@ func TestMarshalJoinConfigurationForVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := MarshalJoinConfigurationForVersion(tt.args.capiObj, tt.args.k8sVersion) + got, err := MarshalJoinConfigurationForVersion(tt.args.capiObj, tt.args.version) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/controlplane/kubeadm/controllers/controller.go b/controlplane/kubeadm/controllers/controller.go index d2a1f72f7d61..3a27a7c77da3 100644 --- a/controlplane/kubeadm/controllers/controller.go +++ b/controlplane/kubeadm/controllers/controller.go @@ -19,9 +19,10 @@ package controllers import ( "context" "fmt" - "sigs.k8s.io/cluster-api/util/collections" "time" + "sigs.k8s.io/cluster-api/util/collections" + "github.com/blang/semver" "github.com/pkg/errors" @@ -372,7 +373,12 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } // Update CoreDNS deployment. - if err := workloadCluster.UpdateCoreDNS(ctx, kcp); err != nil { + parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + } + + if err := workloadCluster.UpdateCoreDNS(ctx, kcp, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update CoreDNS deployment") } @@ -527,7 +533,12 @@ func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster") } - removedMembers, err := workloadCluster.ReconcileEtcdMembers(ctx, nodeNames) + parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) + } + + removedMembers, err := workloadCluster.ReconcileEtcdMembers(ctx, nodeNames, parsedVersion) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed attempt to reconcile etcd members") } diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index 32e0b934930d..1558c2a6a0e3 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/blang/semver" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -974,7 +975,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).To(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed()) var actualCoreDNSCM corev1.ConfigMap g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed()) @@ -1032,7 +1033,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).To(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed()) }) t.Run("should not return an error when there is no CoreDNS configmap", func(t *testing.T) { @@ -1054,7 +1055,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).To(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed()) }) t.Run("should not return an error when there is no CoreDNS deployment", func(t *testing.T) { @@ -1078,7 +1079,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).To(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed()) }) t.Run("should not return an error when no DNS upgrade is requested", func(t *testing.T) { @@ -1103,7 +1104,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).To(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed()) var actualCoreDNSCM corev1.ConfigMap g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed()) @@ -1139,7 +1140,7 @@ kubernetesVersion: metav1.16.1`, }, } - g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp)).ToNot(Succeed()) + g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).ToNot(Succeed()) }) } diff --git a/controlplane/kubeadm/controllers/fakes_test.go b/controlplane/kubeadm/controllers/fakes_test.go index dec2018cc3a6..a42f3fce2ee1 100644 --- a/controlplane/kubeadm/controllers/fakes_test.go +++ b/controlplane/kubeadm/controllers/fakes_test.go @@ -62,7 +62,7 @@ func (f fakeWorkloadCluster) ForwardEtcdLeadership(_ context.Context, _ *cluster return nil } -func (f fakeWorkloadCluster) ReconcileEtcdMembers(ctx context.Context, nodeNames []string) ([]string, error) { +func (f fakeWorkloadCluster) ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) { return nil, nil } @@ -86,7 +86,7 @@ func (f fakeWorkloadCluster) UpdateKubernetesVersionInKubeadmConfigMap(ctx conte return nil } -func (f fakeWorkloadCluster) UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string) error { +func (f fakeWorkloadCluster) UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string, version semver.Version) error { return nil } @@ -98,7 +98,7 @@ func (f fakeWorkloadCluster) RemoveEtcdMemberForMachine(ctx context.Context, mac return nil } -func (f fakeWorkloadCluster) RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error { +func (f fakeWorkloadCluster) RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine, version semver.Version) error { return nil } diff --git a/controlplane/kubeadm/controllers/remediation.go b/controlplane/kubeadm/controllers/remediation.go index 977137f8b99e..f8d5e996a47c 100644 --- a/controlplane/kubeadm/controllers/remediation.go +++ b/controlplane/kubeadm/controllers/remediation.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "github.com/blang/semver" "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" @@ -135,7 +136,12 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } } - if err := workloadCluster.RemoveMachineFromKubeadmConfigMap(ctx, machineToBeRemediated); err != nil { + parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) + } + + if err := workloadCluster.RemoveMachineFromKubeadmConfigMap(ctx, machineToBeRemediated, parsedVersion); err != nil { log.Error(err, "Failed to remove machine from kubeadm ConfigMap") return ctrl.Result{}, err } diff --git a/controlplane/kubeadm/controllers/remediation_test.go b/controlplane/kubeadm/controllers/remediation_test.go index 805b58aee8d1..c10290bed075 100644 --- a/controlplane/kubeadm/controllers/remediation_test.go +++ b/controlplane/kubeadm/controllers/remediation_test.go @@ -228,6 +228,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ Replicas: utilpointer.Int32Ptr(2), + Version: "v1.19.1", }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -276,6 +277,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ Replicas: utilpointer.Int32Ptr(3), + Version: "v1.19.1", }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -325,6 +327,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ Replicas: utilpointer.Int32Ptr(4), + Version: "v1.19.1", }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4), diff --git a/controlplane/kubeadm/controllers/scale.go b/controlplane/kubeadm/controllers/scale.go index 8a1319d0447c..b5791639ba64 100644 --- a/controlplane/kubeadm/controllers/scale.go +++ b/controlplane/kubeadm/controllers/scale.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "github.com/blang/semver" "sigs.k8s.io/cluster-api/util/collections" "strings" @@ -128,7 +129,12 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( } } - if err := workloadCluster.RemoveMachineFromKubeadmConfigMap(ctx, machineToDelete); err != nil { + parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + } + + if err := workloadCluster.RemoveMachineFromKubeadmConfigMap(ctx, machineToDelete, parsedVersion); err != nil { logger.Error(err, "Failed to remove machine from kubeadm ConfigMap") return ctrl.Result{}, err } diff --git a/controlplane/kubeadm/controllers/scale_test.go b/controlplane/kubeadm/controllers/scale_test.go index 780d708e1b3a..810b7bfc832f 100644 --- a/controlplane/kubeadm/controllers/scale_test.go +++ b/controlplane/kubeadm/controllers/scale_test.go @@ -192,7 +192,11 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } cluster := &clusterv1.Cluster{} - kcp := &controlplanev1.KubeadmControlPlane{} + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.1", + }, + } setKCPHealthy(kcp) controlPlane := &internal.ControlPlane{ KCP: kcp, @@ -229,7 +233,11 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } cluster := &clusterv1.Cluster{} - kcp := &controlplanev1.KubeadmControlPlane{} + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.1", + }, + } controlPlane := &internal.ControlPlane{ KCP: kcp, Cluster: cluster, diff --git a/controlplane/kubeadm/controllers/upgrade.go b/controlplane/kubeadm/controllers/upgrade.go index 55be6292baf0..30cbd251a46b 100644 --- a/controlplane/kubeadm/controllers/upgrade.go +++ b/controlplane/kubeadm/controllers/upgrade.go @@ -75,28 +75,28 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil { imageRepository := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository - if err := workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(ctx, imageRepository); err != nil { + if err := workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(ctx, imageRepository, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update the image repository in the kubeadm config map") } } if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil && kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil { meta := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageMeta - if err := workloadCluster.UpdateEtcdVersionInKubeadmConfigMap(ctx, meta.ImageRepository, meta.ImageTag); err != nil { + if err := workloadCluster.UpdateEtcdVersionInKubeadmConfigMap(ctx, meta.ImageRepository, meta.ImageTag, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update the etcd version in the kubeadm config map") } } if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil { - if err := workloadCluster.UpdateAPIServerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer); err != nil { + if err := workloadCluster.UpdateAPIServerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update api server in the kubeadm config map") } - if err := workloadCluster.UpdateControllerManagerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager); err != nil { + if err := workloadCluster.UpdateControllerManagerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update controller manager in the kubeadm config map") } - if err := workloadCluster.UpdateSchedulerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler); err != nil { + if err := workloadCluster.UpdateSchedulerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update scheduler in the kubeadm config map") } } diff --git a/controlplane/kubeadm/internal/kubeadm_config_map.go b/controlplane/kubeadm/internal/kubeadm_config_map.go deleted file mode 100644 index 776448e45616..000000000000 --- a/controlplane/kubeadm/internal/kubeadm_config_map.go +++ /dev/null @@ -1,295 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "reflect" - "strings" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" - "sigs.k8s.io/yaml" -) - -const ( - clusterStatusKey = "ClusterStatus" - clusterConfigurationKey = "ClusterConfiguration" - apiVersionKey = "apiVersion" - statusAPIEndpointsKey = "apiEndpoints" - configVersionKey = "kubernetesVersion" - dnsKey = "dns" - dnsImageRepositoryKey = "imageRepository" - dnsImageTagKey = "imageTag" - configImageRepositoryKey = "imageRepository" - apiServerKey = "apiServer" - controllerManagerKey = "controllerManager" - schedulerKey = "scheduler" -) - -// kubeadmConfig wraps up interactions necessary for modifying the kubeadm config during an upgrade. -type kubeadmConfig struct { - ConfigMap *corev1.ConfigMap -} - -// RemoveAPIEndpoint removes an APIEndpoint fromt he kubeadm config cluster status config map. -func (k *kubeadmConfig) RemoveAPIEndpoint(endpoint string) error { - data, ok := k.ConfigMap.Data[clusterStatusKey] - if !ok { - return errors.Errorf("unable to find %q key in kubeadm ConfigMap", clusterStatusKey) - } - status, err := yamlToUnstructured([]byte(data)) - if err != nil { - return errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterStatusKey) - } - endpoints, _, err := unstructured.NestedMap(status.UnstructuredContent(), statusAPIEndpointsKey) - if err != nil { - return errors.Wrapf(err, "unable to extract %q from kubeadm ConfigMap's %q", statusAPIEndpointsKey, clusterStatusKey) - } - delete(endpoints, endpoint) - if err := unstructured.SetNestedMap(status.UnstructuredContent(), endpoints, statusAPIEndpointsKey); err != nil { - return errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap's %q", statusAPIEndpointsKey, clusterStatusKey) - } - updated, err := yaml.Marshal(status) - if err != nil { - return errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterStatusKey) - } - k.ConfigMap.Data[clusterStatusKey] = string(updated) - return nil -} - -// UpdateKubernetesVersion changes the kubernetes version found in the kubeadm config map. -func (k *kubeadmConfig) UpdateKubernetesVersion(version string) error { - if k.ConfigMap == nil { - return errors.New("unable to operate on a nil config map") - } - data, ok := k.ConfigMap.Data[clusterConfigurationKey] - if !ok { - return errors.Errorf("unable to find %q key in kubeadm ConfigMap", clusterConfigurationKey) - } - configuration, err := yamlToUnstructured([]byte(data)) - if err != nil { - return errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterConfigurationKey) - } - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), version, configVersionKey); err != nil { - return errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap's %q", configVersionKey, clusterConfigurationKey) - } - - // Fix the ClusterConfiguration according to the target Kubernetes version - // IMPORTANT: This is a stop-gap explicitly designed for back-porting on the v1alpha3 branch. - // This allows to unblock removal of the v1beta1 API in kubeadm by making Cluster API to use the v1beta2 kubeadm API - // under the assumption that the serialized version of the two APIs is equal as discussed; see - // "Insulate users from kubeadm API version changes" CAEP for more details. - // NOTE: This solution will stop to work when kubeadm will drop then v1beta2 kubeadm API, but this gives - // enough time (9/12 months from the deprecation date, not yet announced) for the users to migrate to - // the v1alpha4 release of Cluster API, where a proper conversion mechanism is going to be supported. - gv, err := kubeadmtypes.KubeVersionToKubeadmAPIGroupVersion(version) - if err != nil { - return err - } - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), gv.String(), apiVersionKey); err != nil { - return errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap's %q", apiVersionKey, clusterConfigurationKey) - } - - updated, err := yaml.Marshal(configuration) - if err != nil { - return errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterConfigurationKey) - } - k.ConfigMap.Data[clusterConfigurationKey] = string(updated) - return nil -} - -// UpdateImageRepository changes the image repository found in the kubeadm config map. -func (k *kubeadmConfig) UpdateImageRepository(imageRepository string) error { - if imageRepository == "" { - return nil - } - data, ok := k.ConfigMap.Data[clusterConfigurationKey] - if !ok { - return errors.Errorf("unable to find %q key in kubeadm ConfigMap", clusterConfigurationKey) - } - configuration, err := yamlToUnstructured([]byte(data)) - if err != nil { - return errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterConfigurationKey) - } - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), imageRepository, configImageRepositoryKey); err != nil { - return errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap's %q", imageRepository, clusterConfigurationKey) - } - updated, err := yaml.Marshal(configuration) - if err != nil { - return errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterConfigurationKey) - } - k.ConfigMap.Data[clusterConfigurationKey] = string(updated) - return nil -} - -// UpdateEtcdMeta sets the local etcd's configuration's image repository and image tag. -func (k *kubeadmConfig) UpdateEtcdMeta(imageRepository, imageTag string) (bool, error) { - data, ok := k.ConfigMap.Data[clusterConfigurationKey] - if !ok { - return false, errors.Errorf("unable to find %q in kubeadm ConfigMap", clusterConfigurationKey) - } - configuration, err := yamlToUnstructured([]byte(data)) - if err != nil { - return false, errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterConfigurationKey) - } - - var changed bool - - // Handle etcd.local.imageRepository. - imageRepositoryPath := []string{"etcd", "local", "imageRepository"} - currentImageRepository, _, err := unstructured.NestedString(configuration.UnstructuredContent(), imageRepositoryPath...) - if err != nil { - return false, errors.Wrapf(err, "unable to retrieve %q from kubeadm ConfigMap", strings.Join(imageRepositoryPath, ".")) - } - if currentImageRepository != imageRepository { - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), imageRepository, imageRepositoryPath...); err != nil { - return false, errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap", strings.Join(imageRepositoryPath, ".")) - } - changed = true - } - - // Handle etcd.local.imageTag. - imageTagPath := []string{"etcd", "local", "imageTag"} - currentImageTag, _, err := unstructured.NestedString(configuration.UnstructuredContent(), imageTagPath...) - if err != nil { - return false, errors.Wrapf(err, "unable to retrieve %q from kubeadm ConfigMap", strings.Join(imageTagPath, ".")) - } - if currentImageTag != imageTag { - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), imageTag, imageTagPath...); err != nil { - return false, errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap", strings.Join(imageTagPath, ".")) - } - changed = true - } - - // Return early if no changes have been performed. - if !changed { - return changed, nil - } - - updated, err := yaml.Marshal(configuration) - if err != nil { - return false, errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterConfigurationKey) - } - k.ConfigMap.Data[clusterConfigurationKey] = string(updated) - return changed, nil -} - -// UpdateCoreDNSImageInfo changes the dns.ImageTag and dns.ImageRepository -// found in the kubeadm config map. -func (k *kubeadmConfig) UpdateCoreDNSImageInfo(repository, tag string) error { - data, ok := k.ConfigMap.Data[clusterConfigurationKey] - if !ok { - return errors.Errorf("unable to find %q in kubeadm ConfigMap", clusterConfigurationKey) - } - configuration, err := yamlToUnstructured([]byte(data)) - if err != nil { - return errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterConfigurationKey) - } - dnsMap := map[string]string{ - dnsImageRepositoryKey: repository, - dnsImageTagKey: tag, - } - if err := unstructured.SetNestedStringMap(configuration.UnstructuredContent(), dnsMap, dnsKey); err != nil { - return errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap", dnsKey) - } - updated, err := yaml.Marshal(configuration) - if err != nil { - return errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterConfigurationKey) - } - k.ConfigMap.Data[clusterConfigurationKey] = string(updated) - return nil -} - -// UpdateAPIServer sets the api server configuration to values set in `apiServer` in kubeadm config map. -func (k *kubeadmConfig) UpdateAPIServer(apiServer bootstrapv1.APIServer) (bool, error) { - changed, err := k.updateClusterConfiguration(apiServer, apiServerKey) - if err != nil { - return false, errors.Wrap(err, "unable to update api server configuration in kubeadm config map") - } - return changed, nil -} - -// UpdateControllerManager sets the controller manager configuration to values set in `controllerManager` in kubeadm config map. -func (k *kubeadmConfig) UpdateControllerManager(controllerManager bootstrapv1.ControlPlaneComponent) (bool, error) { - changed, err := k.updateClusterConfiguration(controllerManager, controllerManagerKey) - if err != nil { - return false, errors.Wrap(err, "unable to update controller manager configuration in kubeadm config map") - } - return changed, nil -} - -// UpdateScheduler sets the scheduler configuration to values set in `scheduler` in kubeadm config map. -func (k *kubeadmConfig) UpdateScheduler(scheduler bootstrapv1.ControlPlaneComponent) (bool, error) { - changed, err := k.updateClusterConfiguration(scheduler, schedulerKey) - if err != nil { - return false, errors.Wrap(err, "unable to update scheduler configuration in kubeadm config map") - } - return changed, nil -} - -// updateClusterConfiguration is a generic method to update any kubeadm ClusterConfiguration spec with custom types in the specified path. -func (k *kubeadmConfig) updateClusterConfiguration(config interface{}, path ...string) (bool, error) { - data, ok := k.ConfigMap.Data[clusterConfigurationKey] - if !ok { - return false, errors.Errorf("unable to find %q in kubeadm ConfigMap", clusterConfigurationKey) - } - - configuration, err := yamlToUnstructured([]byte(data)) - if err != nil { - return false, errors.Wrapf(err, "unable to decode kubeadm ConfigMap's %q to Unstructured object", clusterConfigurationKey) - } - - currentConfig, _, err := unstructured.NestedFieldCopy(configuration.UnstructuredContent(), path...) - if err != nil { - return false, errors.Wrapf(err, "unable to retrieve %q from kubeadm ConfigMap", strings.Join(path, ".")) - } - - // convert config to map[string]interface because unstructured.SetNestedField does not accept custom structs. - newConfig, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&config) - if err != nil { - return false, errors.Wrap(err, "unable to convert config to unstructured") - } - - // if there are no changes, return early. - if reflect.DeepEqual(newConfig, currentConfig) { - return false, nil - } - - if err := unstructured.SetNestedField(configuration.UnstructuredContent(), newConfig, path...); err != nil { - return false, errors.Wrapf(err, "unable to update %q on kubeadm ConfigMap", strings.Join(path, ".")) - } - - updated, err := yaml.Marshal(configuration) - if err != nil { - return false, errors.Wrapf(err, "unable to encode kubeadm ConfigMap's %q to YAML", clusterConfigurationKey) - } - - k.ConfigMap.Data[clusterConfigurationKey] = string(updated) - return true, nil -} - -// yamlToUnstructured looks inside a config map for a specific key and extracts the embedded YAML into an -// *unstructured.Unstructured. -func yamlToUnstructured(rawYAML []byte) (*unstructured.Unstructured, error) { - unst := &unstructured.Unstructured{} - err := yaml.Unmarshal(rawYAML, unst) - return unst, err -} diff --git a/controlplane/kubeadm/internal/kubeadm_config_map_test.go b/controlplane/kubeadm/internal/kubeadm_config_map_test.go deleted file mode 100644 index ac4c024e743d..000000000000 --- a/controlplane/kubeadm/internal/kubeadm_config_map_test.go +++ /dev/null @@ -1,978 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "errors" - "testing" - "time" - - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - "sigs.k8s.io/yaml" -) - -func TestUpdateKubernetesVersion(t *testing.T) { - kconfv1beta1 := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadmconfig", - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: v1.16.1 -`, - }, - } - - kconfv1beta2 := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadmconfig", - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -kubernetesVersion: v1.16.1 -`, - }, - } - - kubeadmConfigNoKey := kconfv1beta2.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) - - kubeadmConfigBadData := kconfv1beta2.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `something` - - tests := []struct { - name string - version string - config *corev1.ConfigMap - expectErr bool - }{ - { - name: "updates the config map and changes the kubeadm API version", - version: "v1.17.2", - config: kconfv1beta1, - expectErr: false, - }, - { - name: "updates the config map and preserves the kubeadm API version", - version: "v1.17.2", - config: kconfv1beta2, - expectErr: false, - }, - { - name: "returns error if cannot find config map", - expectErr: true, - }, - { - name: "returns error if config has bad data", - config: kubeadmConfigBadData, - expectErr: true, - }, - { - name: "returns error if config doesn't have cluster config key", - config: kubeadmConfigNoKey, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - conf := tt.config.DeepCopy() - k := kubeadmConfig{ - ConfigMap: conf, - } - err := k.UpdateKubernetesVersion(tt.version) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conf.Data[clusterConfigurationKey]).To(ContainSubstring("kubernetesVersion: v1.17.2")) - g.Expect(conf.Data[clusterConfigurationKey]).To(ContainSubstring("apiVersion: kubeadm.k8s.io/v1beta2")) - }) - } -} - -func Test_kubeadmConfig_RemoveAPIEndpoint(t *testing.T) { - g := NewWithT(t) - original := &corev1.ConfigMap{ - Data: map[string]string{ - "ClusterStatus": `apiEndpoints: - ip-10-0-0-1.ec2.internal: - advertiseAddress: 10.0.0.1 - bindPort: 6443 - ip-10-0-0-2.ec2.internal: - advertiseAddress: 10.0.0.2 - bindPort: 6443 - someFieldThatIsAddedInTheFuture: bar - ip-10-0-0-3.ec2.internal: - advertiseAddress: 10.0.0.3 - bindPort: 6443 - someFieldThatIsAddedInTheFuture: baz - ip-10-0-0-4.ec2.internal: - advertiseAddress: 10.0.0.4 - bindPort: 6443 - someFieldThatIsAddedInTheFuture: fizzbuzz -apiVersion: kubeadm.k8s.io/vNbetaM -kind: ClusterStatus`, - }, - } - kc := kubeadmConfig{ConfigMap: original} - g.Expect(kc.RemoveAPIEndpoint("ip-10-0-0-3.ec2.internal")).ToNot(HaveOccurred()) - g.Expect(kc.ConfigMap.Data).To(HaveKey("ClusterStatus")) - var status struct { - APIEndpoints map[string]interface{} `yaml:"apiEndpoints"` - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - - Extra map[string]interface{} `yaml:",inline"` - } - g.Expect(yaml.UnmarshalStrict([]byte(kc.ConfigMap.Data["ClusterStatus"]), &status)).To(Succeed()) - g.Expect(status.Extra).To(BeEmpty()) - - g.Expect(status.APIEndpoints).To(SatisfyAll( - HaveLen(3), - HaveKey("ip-10-0-0-1.ec2.internal"), - HaveKey("ip-10-0-0-2.ec2.internal"), - HaveKey("ip-10-0-0-4.ec2.internal"), - WithTransform(func(ep map[string]interface{}) interface{} { - return ep["ip-10-0-0-4.ec2.internal"] - }, SatisfyAll( - HaveKeyWithValue("advertiseAddress", "10.0.0.4"), - HaveKey("bindPort"), - HaveKey("someFieldThatIsAddedInTheFuture"), - )), - )) -} - -func TestUpdateEtcdMeta(t *testing.T) { - tests := []struct { - name string - clusterConfigurationValue string - imageRepository string - imageTag string - expectChanged bool - expectErr error - }{ - { - name: "it should set the values, if they were empty", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - dataDir: /var/lib/etcd -`, - imageRepository: "gcr.io/k8s/etcd", - imageTag: "0.10.9", - expectChanged: true, - }, - { - name: "it should return false with no error, if there are no changes", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - dataDir: /var/lib/etcd - imageRepository: "gcr.io/k8s/etcd" - imageTag: "0.10.9" -`, - imageRepository: "gcr.io/k8s/etcd", - imageTag: "0.10.9", - expectChanged: false, - }, - { - name: "it shouldn't write empty strings", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - dataDir: /var/lib/etcd -`, - imageRepository: "", - imageTag: "", - expectChanged: false, - }, - { - name: "it should overwrite imageTag", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - imageTag: 0.10.8 - dataDir: /var/lib/etcd -`, - imageTag: "0.10.9", - expectChanged: true, - }, - { - name: "it should overwrite imageRepository", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - imageRepository: another-custom-repo - dataDir: /var/lib/etcd -`, - imageRepository: "gcr.io/k8s/etcd", - expectChanged: true, - }, - { - name: "it should error if it's not a valid k8s object", - clusterConfigurationValue: ` -etcd: - local: - imageRepository: another-custom-repo - dataDir: /var/lib/etcd -`, - expectErr: errors.New("Object 'Kind' is missing"), - }, - { - name: "it should error if the current value is a type we don't expect", - clusterConfigurationValue: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - imageRepository: true - dataDir: /var/lib/etcd -`, - expectErr: errors.New(".etcd.local.imageRepository accessor error"), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - kconfig := &kubeadmConfig{ - ConfigMap: &corev1.ConfigMap{ - Data: map[string]string{ - clusterConfigurationKey: test.clusterConfigurationValue, - }, - }, - } - - changed, err := kconfig.UpdateEtcdMeta(test.imageRepository, test.imageTag) - if test.expectErr == nil { - g.Expect(err).ToNot(HaveOccurred()) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(test.expectErr.Error())) - } - - g.Expect(changed).To(Equal(test.expectChanged)) - if changed { - if test.imageRepository != "" { - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).To(ContainSubstring(test.imageRepository)) - } - if test.imageTag != "" { - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).To(ContainSubstring(test.imageTag)) - } - } - }) - } -} - -func Test_kubeadmConfig_UpdateCoreDNSImageInfo(t *testing.T) { - cm := &corev1.ConfigMap{ - Data: map[string]string{ - "ClusterConfiguration": `apiServer: - extraArgs: - authorization-mode: Node,RBAC - cloud-provider: aws - timeoutForControlPlane: 4m0s -apiVersion: kubeadm.k8s.io/v1beta2 -certificatesDir: /etc/kubernetes/pki -clusterName: foobar -controlPlaneEndpoint: foobar.us-east-2.elb.amazonaws.com -controllerManager: - extraArgs: - cloud-provider: aws -dns: - type: CoreDNS -etcd: - local: - dataDir: /var/lib/etcd -imageRepository: k8s.gcr.io -kind: ClusterConfiguration -kubernetesVersion: v1.16.1 -networking: - dnsDomain: cluster.local - podSubnet: 192.168.0.0/16 - serviceSubnet: 10.96.0.0/12 -scheduler: {}`, - }, - } - - badcm := &corev1.ConfigMap{ - Data: map[string]string{ - "ClusterConfiguration": `apiServer: - extraArgs: - authorization-mode: Node,RBAC - ...`, - }, - } - - tests := []struct { - name string - cm *corev1.ConfigMap - expectErr bool - }{ - { - name: "sets the image repository and tag", - cm: cm, - expectErr: false, - }, - { - name: "returns error if unable to convert yaml", - cm: badcm, - expectErr: true, - }, - { - name: "returns error if cannot find cluster config key", - cm: &corev1.ConfigMap{Data: map[string]string{}}, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - imageRepository := "gcr.io/example" - imageTag := "v1.0.1-sometag" - kc := kubeadmConfig{ConfigMap: tt.cm} - - if tt.expectErr { - g.Expect(kc.UpdateCoreDNSImageInfo(imageRepository, imageTag)).ToNot(Succeed()) - return - } - g.Expect(kc.UpdateCoreDNSImageInfo(imageRepository, imageTag)).To(Succeed()) - g.Expect(kc.ConfigMap.Data).To(HaveKey(clusterConfigurationKey)) - - type dns struct { - Type string `yaml:"type"` - ImageRepository string `yaml:"imageRepository"` - ImageTag string `yaml:"imageTag"` - } - var actualClusterConfig struct { - DNS dns `yaml:"dns"` - } - - g.Expect(yaml.Unmarshal([]byte(kc.ConfigMap.Data[clusterConfigurationKey]), &actualClusterConfig)).To(Succeed()) - actualDNS := actualClusterConfig.DNS - g.Expect(actualDNS.ImageRepository).To(Equal(imageRepository)) - g.Expect(actualDNS.ImageTag).To(Equal(imageTag)) - }) - } -} - -func TestUpdateImageRepository(t *testing.T) { - tests := []struct { - name string - data map[string]string - imageRepository string - expected string - expectErr error - }{ - { - name: "it should set the values, if they were empty", - data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -imageRepository: k8s.gcr.io -`}, - imageRepository: "example.com/k8s", - expected: "example.com/k8s", - }, - { - name: "it shouldn't write empty strings", - data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -imageRepository: k8s.gcr.io -`}, - imageRepository: "", - expected: "k8s.gcr.io", - }, - { - name: "it should error if it's not a valid k8s object", - data: map[string]string{ - clusterConfigurationKey: ` -imageRepository: "cool" -`}, - imageRepository: "example.com/k8s", - expectErr: errors.New("Object 'Kind' is missing"), - }, - { - name: "returns an error if config map doesn't have the cluster config data key", - data: map[string]string{}, - imageRepository: "example.com/k8s", - expectErr: errors.New("unable to find \"ClusterConfiguration\" key in kubeadm ConfigMap"), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - kconfig := &kubeadmConfig{ - ConfigMap: &corev1.ConfigMap{ - Data: test.data, - }, - } - - err := kconfig.UpdateImageRepository(test.imageRepository) - if test.expectErr == nil { - g.Expect(err).ToNot(HaveOccurred()) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(test.expectErr.Error())) - } - - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).To(ContainSubstring(test.expected)) - }) - } -} - -func TestApiServer(t *testing.T) { - tests := []struct { - name string - data map[string]string - newAPIServer bootstrapv1.APIServer - expected string - expectErr error - changed bool - }{ - { - name: "it should set the values when no api server config is present", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newAPIServer: bootstrapv1.APIServer{ - ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "foo": "bar", - }, - }, - CertSANs: []string{"foo", "bar"}, - }, - expected: `apiServer: - certSANs: - - foo - - bar - extraArgs: - foo: bar -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - changed: true, - }, - { - name: "it should override existing config with the values set in spec", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -apiServer: - certSANs: - - foo - - bar - extraArgs: - foo: bar - extraVolumes: - - name: mount1 - hostPath: /foo/bar - mountPath: /bar/baz - timeoutForControlPlane: 4m0s -`}, - newAPIServer: bootstrapv1.APIServer{ - ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "bar": "baz", - "someKey": "someVal", - }, - ExtraVolumes: []bootstrapv1.HostPathMount{ - { - Name: "mount2", - HostPath: "/bar/baz", - MountPath: "/foo/bar", - }, - { - Name: "anotherMount", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - CertSANs: []string{"foo", "bar", "baz"}, - TimeoutForControlPlane: &metav1.Duration{Duration: 5 * time.Minute}, - }, - expected: `apiServer: - certSANs: - - foo - - bar - - baz - extraArgs: - bar: baz - someKey: someVal - extraVolumes: - - hostPath: /bar/baz - mountPath: /foo/bar - name: mount2 - - hostPath: /a/b - mountPath: /c/d - name: anotherMount - timeoutForControlPlane: 5m0s -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - changed: true, - }, - { - name: "it should not do anything if there are no changes", - data: map[string]string{ - clusterConfigurationKey: `apiServer: - certSANs: - - foo - - bar - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 - timeoutForControlPlane: 3m0s -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newAPIServer: bootstrapv1.APIServer{ - ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar", "bar": "baz"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{ - Name: "mount1", - HostPath: "/foo/bar", - MountPath: "/bar/baz", - }, - { - Name: "mount2", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - CertSANs: []string{"foo", "bar"}, - TimeoutForControlPlane: &metav1.Duration{Duration: 3 * time.Minute}, - }, - expected: `apiServer: - certSANs: - - foo - - bar - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 - timeoutForControlPlane: 3m0s -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - changed: false, - }, - { - name: "it should return error when the config is invalid", - data: map[string]string{ - clusterConfigurationKey: `apiServer: invalidJson`}, - newAPIServer: bootstrapv1.APIServer{ - CertSANs: []string{"foo", "bar"}, - }, - expectErr: errors.New(""), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - kconfig := &kubeadmConfig{ - ConfigMap: &corev1.ConfigMap{ - Data: test.data, - }, - } - - changed, err := kconfig.UpdateAPIServer(test.newAPIServer) - if test.expectErr == nil { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(changed).Should(Equal(test.changed)) - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).Should(Equal(test.expected)) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(test.expectErr.Error())) - g.Expect(changed).Should(Equal(false)) - } - }) - } -} - -func TestControllerManager(t *testing.T) { - tests := []struct { - name string - data map[string]string - newControllerManager bootstrapv1.ControlPlaneComponent - expected string - expectErr error - changed bool - }{ - { - name: "it should set the values when no controller manager config is present", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newControllerManager: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "foo": "bar", - }, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1", HostPath: "/foo", MountPath: "/bar"}}, - }, - expected: `apiVersion: kubeadm.k8s.io/v1beta2 -controllerManager: - extraArgs: - foo: bar - extraVolumes: - - hostPath: /foo - mountPath: /bar - name: mount1 -kind: ClusterConfiguration -`, - changed: true, - }, - { - name: "it should override existing config with the values set in spec", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -controllerManager: - extraArgs: - foo: bar - extraVolumes: - - name: mount1 - hostPath: /foo/bar - mountPath: /bar/baz -`}, - newControllerManager: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "bar": "baz", - "someKey": "someVal", - }, - ExtraVolumes: []bootstrapv1.HostPathMount{ - { - Name: "mount2", - HostPath: "/bar/baz", - MountPath: "/foo/bar", - }, - { - Name: "anotherMount", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - expected: `apiVersion: kubeadm.k8s.io/v1beta2 -controllerManager: - extraArgs: - bar: baz - someKey: someVal - extraVolumes: - - hostPath: /bar/baz - mountPath: /foo/bar - name: mount2 - - hostPath: /a/b - mountPath: /c/d - name: anotherMount -kind: ClusterConfiguration -`, - changed: true, - }, - { - name: "it should not do anything if there are no changes", - data: map[string]string{ - clusterConfigurationKey: `controllerManager: - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newControllerManager: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar", "bar": "baz"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{ - Name: "mount1", - HostPath: "/foo/bar", - MountPath: "/bar/baz", - }, - { - Name: "mount2", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - expected: `controllerManager: - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - changed: false, - }, - { - name: "it should return error when the config is invalid", - data: map[string]string{ - clusterConfigurationKey: `controllerManager: invalidJson`}, - newControllerManager: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar", "bar": "baz"}, - }, - expectErr: errors.New(""), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - kconfig := &kubeadmConfig{ - ConfigMap: &corev1.ConfigMap{ - Data: test.data, - }, - } - - changed, err := kconfig.UpdateControllerManager(test.newControllerManager) - if test.expectErr == nil { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(changed).Should(Equal(test.changed)) - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).Should(Equal(test.expected)) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(test.expectErr.Error())) - g.Expect(changed).Should(Equal(false)) - } - }) - } -} - -func TestScheduler(t *testing.T) { - tests := []struct { - name string - data map[string]string - newScheduler bootstrapv1.ControlPlaneComponent - expected string - expectErr error - changed bool - }{ - { - name: "it should set the values when no scheduler config is present", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newScheduler: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "foo": "bar", - }, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1", HostPath: "/foo", MountPath: "/bar"}}, - }, - expected: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -scheduler: - extraArgs: - foo: bar - extraVolumes: - - hostPath: /foo - mountPath: /bar - name: mount1 -`, - changed: true, - }, - { - name: "it should override existing config with the values set in spec", - data: map[string]string{ - clusterConfigurationKey: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -scheduler: - extraArgs: - foo: bar - extraVolumes: - - name: mount1 - hostPath: /foo/bar - mountPath: /bar/baz -`}, - newScheduler: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "bar": "baz", - "someKey": "someVal", - }, - ExtraVolumes: []bootstrapv1.HostPathMount{ - { - Name: "mount2", - HostPath: "/bar/baz", - MountPath: "/foo/bar", - }, - { - Name: "anotherMount", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - expected: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -scheduler: - extraArgs: - bar: baz - someKey: someVal - extraVolumes: - - hostPath: /bar/baz - mountPath: /foo/bar - name: mount2 - - hostPath: /a/b - mountPath: /c/d - name: anotherMount -`, - changed: true, - }, - { - name: "it should not do anything if there are no changes", - data: map[string]string{ - clusterConfigurationKey: `scheduler: - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`}, - newScheduler: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar", "bar": "baz"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{ - Name: "mount1", - HostPath: "/foo/bar", - MountPath: "/bar/baz", - }, - { - Name: "mount2", - HostPath: "/a/b", - MountPath: "/c/d", - }, - }, - }, - expected: `scheduler: - extraArgs: - foo: bar - bar: baz - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - - hostPath: /a/b - mountPath: /c/d - name: mount2 -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - changed: false, - }, - { - name: "it should return error when the config is invalid", - data: map[string]string{ - clusterConfigurationKey: `scheduler: invalidJson`}, - newScheduler: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar", "bar": "baz"}, - }, - expectErr: errors.New(""), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - kconfig := &kubeadmConfig{ - ConfigMap: &corev1.ConfigMap{ - Data: test.data, - }, - } - - changed, err := kconfig.UpdateScheduler(test.newScheduler) - if test.expectErr == nil { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(changed).Should(Equal(test.changed)) - g.Expect(kconfig.ConfigMap.Data[clusterConfigurationKey]).Should(Equal(test.expected)) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(test.expectErr.Error())) - g.Expect(changed).Should(Equal(false)) - } - }) - } -} diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index c962420c8a80..2d80676fe76c 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -26,6 +26,7 @@ import ( "crypto/x509/pkix" "fmt" "math/big" + "reflect" "time" "github.com/blang/semver" @@ -37,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" + kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" @@ -52,6 +54,8 @@ const ( kubeletConfigKey = "kubelet" cgroupDriverKey = "cgroupDriver" labelNodeRoleControlPlane = "node-role.kubernetes.io/master" + clusterStatusKey = "ClusterStatus" + clusterConfigurationKey = "ClusterConfiguration" ) var ( @@ -73,22 +77,22 @@ type WorkloadCluster interface { ReconcileKubeletRBACBinding(ctx context.Context, version semver.Version) error ReconcileKubeletRBACRole(ctx context.Context, version semver.Version) error UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version semver.Version) error - UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string) error - UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string) error - UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer) error - UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent) error - UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent) error + UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string, version semver.Version) error + UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string, version semver.Version) error + UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer, version semver.Version) error + UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent, version semver.Version) error + UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent, version semver.Version) error UpdateKubeletConfigMap(ctx context.Context, version semver.Version) error UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error - UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error + UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, version semver.Version) error RemoveEtcdMemberForMachine(ctx context.Context, machine *clusterv1.Machine) error - RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error - RemoveNodeFromKubeadmConfigMap(ctx context.Context, nodeName string) error + RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine, version semver.Version) error + RemoveNodeFromKubeadmConfigMap(ctx context.Context, nodeName string, version semver.Version) error ForwardEtcdLeadership(ctx context.Context, machine *clusterv1.Machine, leaderCandidate *clusterv1.Machine) error AllowBootstrapTokensToGetNodes(ctx context.Context) error // State recovery tasks. - ReconcileEtcdMembers(ctx context.Context, nodeNames []string) ([]string, error) + ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) } // Workload defines operations on workload clusters. @@ -120,37 +124,20 @@ func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.Object } // UpdateImageRepositoryInKubeadmConfigMap updates the image repository in the kubeadm config map. -func (w *Workload) UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string) error { - configMapKey := ctrlclient.ObjectKey{Name: "kubeadm-config", Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - if err := config.UpdateImageRepository(imageRepository); err != nil { - return err - } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil +func (w *Workload) UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + if imageRepository == "" { + return + } + c.ImageRepository = imageRepository + }, version) } // UpdateKubernetesVersionInKubeadmConfigMap updates the kubernetes version in the kubeadm config map. func (w *Workload) UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version semver.Version) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - if err := config.UpdateKubernetesVersion(fmt.Sprintf("v%s", version)); err != nil { - return err - } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + c.KubernetesVersion = fmt.Sprintf("v%s", version.String()) + }, version) } // UpdateKubeletConfigMap will create a new kubelet-config-1.x config map for a new version of the kubelet. @@ -182,7 +169,7 @@ func (w *Workload) UpdateKubeletConfigMap(ctx context.Context, version semver.Ve // In order to avoid using two cgroup drivers on the same machine, // (cgroupfs and systemd cgroup drivers), starting from // 1.21 image builder is going to configure containerd for using the - // systemd driver, and the Kubelet configuration must + // systemd driver, and the Kubelet configuration must be aligned to this change. // NOTE: It is considered safe to update the kubelet-config-1.21 ConfigMap // because only new nodes using v1.21 images will pick up the change during // kubeadm join. @@ -227,103 +214,120 @@ func (w *Workload) UpdateKubeletConfigMap(ctx context.Context, version semver.Ve } // UpdateAPIServerInKubeadmConfigMap updates api server configuration in kubeadm config map. -func (w *Workload) UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - changed, err := config.UpdateAPIServer(apiServer) - if err != nil { - return err - } +func (w *Workload) UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + c.APIServer = apiServer + }, version) +} + +// UpdateControllerManagerInKubeadmConfigMap updates controller manager configuration in kubeadm config map. +func (w *Workload) UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + c.ControllerManager = controllerManager + }, version) +} - if !changed { +// UpdateSchedulerInKubeadmConfigMap updates scheduler configuration in kubeadm config map. +func (w *Workload) UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + c.Scheduler = scheduler + }, version) +} + +// RemoveMachineFromKubeadmConfigMap removes the entry for the machine from the kubeadm configmap. +func (w *Workload) RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine, version semver.Version) error { + if machine == nil || machine.Status.NodeRef == nil { + // Nothing to do, no node for Machine return nil } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil + return w.RemoveNodeFromKubeadmConfigMap(ctx, machine.Status.NodeRef.Name, version) } -// UpdateControllerManagerInKubeadmConfigMap updates controller manager configuration in kubeadm config map. -func (w *Workload) UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) +// RemoveNodeFromKubeadmConfigMap removes the entry for the node from the kubeadm configmap. +func (w *Workload) RemoveNodeFromKubeadmConfigMap(ctx context.Context, name string, version semver.Version) error { + return util.Retry(func() (bool, error) { + if err := w.updateClusterStatus(ctx, func(s *bootstrapv1.ClusterStatus) { + delete(s.APIEndpoints, name) + }, version); err != nil { + return false, err + } + return true, nil + }, 5) +} + +// updateClusterStatus gets the ClusterStatus kubeadm-config ConfigMap, converts it to the +// Cluster API representation, and then applies a mutation func; if changes are detected, the +// data are converted back into the Kubeadm API version in use for the target Kubernetes version and the +// kubeadm-config ConfigMap updated. +func (w *Workload) updateClusterStatus(ctx context.Context, mutator func(status *bootstrapv1.ClusterStatus), version semver.Version) error { + key := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} + configMap, err := w.getConfigMap(ctx, key) if err != nil { - return err + return errors.Wrap(err, "failed to get kubeadmConfigMap") } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - changed, err := config.UpdateControllerManager(controllerManager) - if err != nil { - return err + + currentData, ok := configMap.Data[clusterStatusKey] + if !ok { + return errors.Errorf("unable to find %q in the kubeadm-config ConfigMap", clusterStatusKey) } - if !changed { - return nil + currentClusterStatus, err := kubeadmtypes.UnmarshalClusterStatus(currentData) + if err != nil { + return errors.Wrapf(err, "unable to decode %q in the kubeadm-config ConfigMap's from YAML", clusterStatusKey) } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") + updatedClusterStatus := currentClusterStatus.DeepCopy() + mutator(updatedClusterStatus) + + if !reflect.DeepEqual(currentClusterStatus, updatedClusterStatus) { + updatedData, err := kubeadmtypes.MarshalClusterStatusForVersion(updatedClusterStatus, version) + if err != nil { + return errors.Wrapf(err, "unable to encode %q kubeadm-config ConfigMap's to YAML", clusterStatusKey) + } + configMap.Data[clusterStatusKey] = updatedData + if err := w.Client.Update(ctx, configMap); err != nil { + return errors.Wrap(err, "failed to upgrade the kubeadmConfigMap") + } } return nil } -// UpdateSchedulerInKubeadmConfigMap updates scheduler configuration in kubeadm config map. -func (w *Workload) UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - changed, err := config.UpdateScheduler(scheduler) +// updateClusterConfiguration gets the ClusterConfiguration kubeadm-config ConfigMap, converts it to the +// Cluster API representation, and then applies a mutation func; if changes are detected, the +// data are converted back into the Kubeadm API version in use for the target Kubernetes version and the +// kubeadm-config ConfigMap updated. +func (w *Workload) updateClusterConfiguration(ctx context.Context, mutator func(*bootstrapv1.ClusterConfiguration), version semver.Version) error { + key := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} + configMap, err := w.getConfigMap(ctx, key) if err != nil { - return err + return errors.Wrap(err, "failed to get kubeadmConfigMap") } - if !changed { - return nil + currentData, ok := configMap.Data[clusterConfigurationKey] + if !ok { + return errors.Errorf("unable to find %q in the kubeadm-config ConfigMap", clusterConfigurationKey) } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil -} - -// RemoveMachineFromKubeadmConfigMap removes the entry for the machine from the kubeadm configmap. -func (w *Workload) RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error { - if machine == nil || machine.Status.NodeRef == nil { - // Nothing to do, no node for Machine - return nil + currentObj, err := kubeadmtypes.UnmarshalClusterConfiguration(currentData) + if err != nil { + return errors.Wrapf(err, "unable to decode %q in the kubeadm-config ConfigMap's from YAML", clusterConfigurationKey) } - return w.RemoveNodeFromKubeadmConfigMap(ctx, machine.Status.NodeRef.Name) -} + updatedObj := currentObj.DeepCopy() + mutator(updatedObj) -// RemoveNodeFromKubeadmConfigMap removes the entry for the node from the kubeadm configmap. -func (w *Workload) RemoveNodeFromKubeadmConfigMap(ctx context.Context, name string) error { - return util.Retry(func() (bool, error) { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) + if !reflect.DeepEqual(currentObj, updatedObj) { + updatedData, err := kubeadmtypes.MarshalClusterConfigurationForVersion(updatedObj, version) if err != nil { - Log.Error(err, "unable to get kubeadmConfigMap") - return false, nil - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - if err := config.RemoveAPIEndpoint(name); err != nil { - return false, err + return errors.Wrapf(err, "unable to encode %q kubeadm-config ConfigMap's to YAML", clusterConfigurationKey) } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - Log.Error(err, "error updating kubeadm ConfigMap") - return false, nil + configMap.Data[clusterConfigurationKey] = updatedData + if err := w.Client.Update(ctx, configMap); err != nil { + return errors.Wrap(err, "failed to upgrade the kubeadmConfigMap") } - return true, nil - }, 5) + } + return nil } // ClusterStatus holds stats information about the cluster. @@ -481,3 +485,11 @@ func patchKubeProxyImage(ds *appsv1.DaemonSet, image string) { } } } + +// yamlToUnstructured looks inside a config map for a specific key and extracts the embedded YAML into an +// *unstructured.Unstructured. +func yamlToUnstructured(rawYAML []byte) (*unstructured.Unstructured, error) { + unst := &unstructured.Unstructured{} + err := yaml.Unmarshal(rawYAML, unst) + return unst, err +} diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns.go b/controlplane/kubeadm/internal/workload_cluster_coredns.go index fafeb3470c22..301a3773bcb3 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns.go @@ -74,7 +74,7 @@ type coreDNSInfo struct { // UpdateCoreDNS updates the kubeadm configmap, coredns corefile and coredns // deployment. -func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error { +func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, version semver.Version) error { // Return early if we've been asked to skip CoreDNS upgrades entirely. if _, ok := kcp.Annotations[controlplanev1.SkipCoreDNSAnnotation]; ok { return nil @@ -109,7 +109,7 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.Kubead } // Perform the upgrade. - if err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, &clusterConfig.DNS); err != nil { + if err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, &clusterConfig.DNS, version); err != nil { return err } if err := w.updateCoreDNSCorefile(ctx, info); err != nil { @@ -217,20 +217,11 @@ func (w *Workload) updateCoreDNSDeployment(ctx context.Context, info *coreDNSInf } // UpdateCoreDNSImageInfoInKubeadmConfigMap updates the kubernetes version in the kubeadm config map. -func (w *Workload) updateCoreDNSImageInfoInKubeadmConfigMap(ctx context.Context, dns *bootstrapv1.DNS) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - if err := config.UpdateCoreDNSImageInfo(dns.ImageRepository, dns.ImageTag); err != nil { - return err - } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil +func (w *Workload) updateCoreDNSImageInfoInKubeadmConfigMap(ctx context.Context, dns *bootstrapv1.DNS, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + c.DNS.ImageRepository = dns.ImageRepository + c.DNS.ImageTag = dns.ImageTag + }, version) } // updateCoreDNSCorefile migrates the coredns corefile if there is an increase diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index 62b2775bafcc..3feb5362300b 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -19,15 +19,16 @@ package internal import ( "testing" + "github.com/blang/semver" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +38,7 @@ import ( func TestUpdateCoreDNS(t *testing.T) { validKCP := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -141,7 +142,7 @@ kind: ClusterConfiguration }, }, Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{}, }, @@ -155,7 +156,7 @@ kind: ClusterConfiguration name: "returns early without error if KCP ClusterConfiguration is nil", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{}, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{}, }, }, objs: []client.Object{badCM}, @@ -182,7 +183,7 @@ kind: ClusterConfiguration name: "returns error if validation of CoreDNS image tag fails", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -203,7 +204,7 @@ kind: ClusterConfiguration name: "returns error if unable to update CoreDNS image info in kubeadm config map", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -224,7 +225,7 @@ kind: ClusterConfiguration name: "returns error if unable to update CoreDNS corefile", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -247,7 +248,7 @@ kind: ClusterConfiguration name: "updates everything successfully", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -272,7 +273,7 @@ kind: ClusterConfiguration name: "updates everything successfully to v1.8.0 with a custom repo should not change the image name", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -297,7 +298,7 @@ kind: ClusterConfiguration name: "kubeadm defaults, upgrade from Kubernetes v1.18.x to v1.19.y (from k8s.gcr.io/coredns:1.6.7 to k8s.gcr.io/coredns:1.7.0)", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -321,7 +322,7 @@ kind: ClusterConfiguration name: "kubeadm defaults, upgrade from Kubernetes v1.19.x to v1.20.y (stay on k8s.gcr.io/coredns:1.7.0)", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -344,7 +345,7 @@ kind: ClusterConfiguration name: "kubeadm defaults, upgrade from Kubernetes v1.20.x to v1.21.y (from k8s.gcr.io/coredns:1.7.0 to k8s.gcr.io/coredns/coredns:v1.8.0)", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -368,7 +369,7 @@ kind: ClusterConfiguration name: "kubeadm defaults, upgrade from Kubernetes v1.21.x to v1.22.y (stay on k8s.gcr.io/coredns/coredns:v1.8.0)", kcp: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ @@ -427,7 +428,7 @@ kind: ClusterConfiguration Client: testEnv.GetClient(), CoreDNSMigrator: tt.migrator, } - err := w.UpdateCoreDNS(ctx, tt.kcp) + err := w.UpdateCoreDNS(ctx, tt.kcp, semver.MustParse("1.19.1")) if tt.expectErr { g.Expect(err).To(HaveOccurred()) @@ -886,94 +887,63 @@ func TestGetCoreDNSInfo(t *testing.T) { } func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - "ClusterConfiguration": `apiServer: - extraArgs: - authorization-mode: Node,RBAC - cloud-provider: aws - timeoutForControlPlane: 4m0s -apiVersion: kubeadm.k8s.io/v1beta2 -certificatesDir: /etc/kubernetes/pki -clusterName: foobar -controlPlaneEndpoint: foobar.us-east-2.elb.amazonaws.com -controllerManager: - extraArgs: - cloud-provider: aws -dns: - type: CoreDNS -etcd: - local: - dataDir: /var/lib/etcd -imageRepository: k8s.gcr.io -kind: ClusterConfiguration -kubernetesVersion: v1.16.1 -networking: - dnsDomain: cluster.local - podSubnet: 192.168.0.0/16 - serviceSubnet: 10.96.0.0/12 -scheduler: {}`, - }, - } - - emptyCM := cm.DeepCopy() - delete(emptyCM.Data, "ClusterConfiguration") - - dns := &bootstrapv1.DNS{ - ImageMeta: bootstrapv1.ImageMeta{ - ImageRepository: "gcr.io/example", - ImageTag: "1.0.1-somever.1", - }, - } - + g := NewWithT(t) + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - dns *bootstrapv1.DNS - objs []client.Object - expectErr bool + name string + clusterConfigurationData string + newDNS bootstrapv1.DNS + wantClusterConfiguration string }{ { - name: "returns error if unable to find config map", - dns: dns, - expectErr: true, - }, - { - name: "returns error if config map is empty", - objs: []client.Object{emptyCM}, - dns: dns, - expectErr: true, - }, - { - name: "succeeds if updates correctly", - dns: dns, - objs: []client.Object{cm}, - expectErr: false, + name: "it should set the DNS image config", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n", + newDNS: bootstrapv1.DNS{ + ImageMeta: bootstrapv1.ImageMeta{ + ImageRepository: "example.com/k8s", + ImageTag: "v1.2.3", + }, + }, + wantClusterConfiguration: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns:\n" + + " imageRepository: example.com/k8s\n" + + " imageTag: v1.2.3\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "networking: {}\n" + + "scheduler: {}\n", }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() + w := &Workload{ Client: fakeClient, } - - err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, tt.dns) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } + err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, &tt.newDNS, semver.MustParse("1.19.1")) g.Expect(err).ToNot(HaveOccurred()) - var expectedConfigMap corev1.ConfigMap - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) - g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("1.0.1-somever.1"))) - g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("gcr.io/example"))) + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) }) } } diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd.go b/controlplane/kubeadm/internal/workload_cluster_etcd.go index bfc39702c51e..664d45db4a3d 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd.go @@ -19,13 +19,13 @@ package internal import ( "context" + "github.com/blang/semver" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) type etcdClientFor interface { @@ -35,7 +35,7 @@ type etcdClientFor interface { // ReconcileEtcdMembers iterates over all etcd members and finds members that do not have corresponding nodes. // If there are any such members, it deletes them from etcd and removes their nodes from the kubeadm configmap so that kubeadm does not run etcd health checks on them. -func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string) ([]string, error) { +func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) { removedMembers := []string{} errs := []error{} for _, nodeName := range nodeNames { @@ -73,7 +73,7 @@ func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string) errs = append(errs, err) } - if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name); err != nil { + if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name, version); err != nil { errs = append(errs, err) } } @@ -83,21 +83,13 @@ func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string) } // UpdateEtcdVersionInKubeadmConfigMap sets the imageRepository or the imageTag or both in the kubeadm config map. -func (w *Workload) UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string) error { - configMapKey := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} - kubeadmConfigMap, err := w.getConfigMap(ctx, configMapKey) - if err != nil { - return err - } - config := &kubeadmConfig{ConfigMap: kubeadmConfigMap} - changed, err := config.UpdateEtcdMeta(imageRepository, imageTag) - if err != nil || !changed { - return err - } - if err := w.Client.Update(ctx, config.ConfigMap); err != nil { - return errors.Wrap(err, "error updating kubeadm ConfigMap") - } - return nil +func (w *Workload) UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string, version semver.Version) error { + return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { + if c.Etcd.Local != nil { + c.Etcd.Local.ImageRepository = imageRepository + c.Etcd.Local.ImageTag = imageTag + } + }, version) } // RemoveEtcdMemberForMachine removes the etcd member from the target cluster's etcd cluster. diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go index 831c8f5e163c..515e5087e0c9 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go @@ -21,8 +21,9 @@ import ( "errors" "testing" + "github.com/blang/semver" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - "go.etcd.io/etcd/clientv3" pb "go.etcd.io/etcd/etcdserver/etcdserverpb" corev1 "k8s.io/api/core/v1" @@ -32,92 +33,81 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -etcd: - local: - dataDir: /var/lib/etcd - imageRepository: "gcr.io/k8s/etcd" - imageTag: "0.10.9" -`, - }, - } - g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - tests := []struct { - name string - objs []client.Object - imageRepo string - imageTag string - expectErr bool - expectedClusterConfig string + name string + clusterConfigurationData string + newImageRepository string + newImageTag string + wantClusterConfiguration string }{ { - name: "returns error if unable to find kubeadm-config", - objs: nil, - expectErr: true, + name: "it should set etcd version when local etcd", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "etcd:\n" + + " local: {}\n", + newImageRepository: "example.com/k8s", + newImageTag: "v1.6.0", + wantClusterConfiguration: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns: {}\n" + + "etcd:\n" + + " local:\n" + + " imageRepository: example.com/k8s\n" + + " imageTag: v1.6.0\n" + + "kind: ClusterConfiguration\n" + + "networking: {}\n" + + "scheduler: {}\n", }, { - name: "updates the config map", - expectErr: false, - objs: []client.Object{kubeadmConfig}, - imageRepo: "gcr.io/imgRepo", - imageTag: "v1.0.1-sometag.1", - expectedClusterConfig: `apiVersion: kubeadm.k8s.io/v1beta2 -etcd: - local: - dataDir: /var/lib/etcd - imageRepository: gcr.io/imgRepo - imageTag: v1.0.1-sometag.1 -kind: ClusterConfiguration -`, - }, - { - name: "doesn't update the config map if there are no changes", - expectErr: false, - imageRepo: "gcr.io/k8s/etcd", - imageTag: "0.10.9", - objs: []client.Object{kubeadmConfig}, + name: "no op when external etcd", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "etcd:\n" + + " external: {}\n", + newImageRepository: "example.com/k8s", + newImageTag: "v1.6.0", + wantClusterConfiguration: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "etcd:\n" + + " external: {}\n", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() + w := &Workload{ Client: fakeClient, } - err := w.UpdateEtcdVersionInKubeadmConfigMap(ctx, tt.imageRepo, tt.imageTag) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } + err := w.UpdateEtcdVersionInKubeadmConfigMap(ctx, tt.newImageRepository, tt.newImageTag, semver.MustParse("1.19.1")) g.Expect(err).ToNot(HaveOccurred()) - if tt.expectedClusterConfig != "" { - var actualConfig corev1.ConfigMap - g.Expect(w.Client.Get( - ctx, - ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, - &actualConfig, - )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).To(Equal(tt.expectedClusterConfig)) - } + + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) }) } } @@ -470,7 +460,7 @@ func TestReconcileEtcdMembers(t *testing.T) { ip-10-0-0-3.ec2.internal: advertiseAddress: 10.0.0.3 bindPort: 6443 -apiVersion: kubeadm.k8s.io/vNbetaM +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus`, }, } @@ -552,7 +542,7 @@ kind: ClusterStatus`, etcdClientGenerator: tt.etcdClientGenerator, } ctx := context.TODO() - _, err := w.ReconcileEtcdMembers(ctx, tt.nodes) + _, err := w.ReconcileEtcdMembers(ctx, tt.nodes, semver.MustParse("1.19.1")) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return @@ -566,6 +556,82 @@ kind: ClusterStatus`, } } +func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { + g := NewWithT(t) + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + tests := []struct { + name string + apiEndpoint string + clusterStatusData string + wantClusterStatus string + }{ + { + name: "removes the api endpoint", + apiEndpoint: "ip-10-0-0-2.ec2.internal", + clusterStatusData: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + " ip-10-0-0-2.ec2.internal:\n" + + " advertiseAddress: 10.0.0.2\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + wantClusterStatus: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + { + name: "no op if the api endpoint does not exists", + apiEndpoint: "ip-10-0-0-2.ec2.internal", + clusterStatusData: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + wantClusterStatus: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: tt.clusterStatusData, + }, + }).Build() + + w := &Workload{ + Client: fakeClient, + } + err := w.RemoveNodeFromKubeadmConfigMap(ctx, tt.apiEndpoint, semver.MustParse("1.19.1")) + g.Expect(err).ToNot(HaveOccurred()) + + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + g.Expect(actualConfig.Data[clusterStatusKey]).Should(Equal(tt.wantClusterStatus), cmp.Diff(tt.wantClusterStatus, actualConfig.Data[clusterStatusKey])) + }) + } +} + type fakeEtcdClientGenerator struct { forNodesClient *etcd.Client forNodesClientFunc func([]string) (*etcd.Client, error) diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 20b71c0f8477..b9a18e6347df 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -21,12 +21,10 @@ import ( "errors" "fmt" "testing" - "time" + "github.com/blang/semver" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - - "github.com/blang/semver" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -90,7 +87,7 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { KCP: &v1alpha4.KubeadmControlPlane{ Spec: v1alpha4.KubeadmControlPlaneSpec{ Version: "v1.16.3", - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ImageRepository: "foo.bar.example/baz/qux", }, @@ -105,7 +102,7 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { KCP: &v1alpha4.KubeadmControlPlane{ Spec: v1alpha4.KubeadmControlPlaneSpec{ Version: "v1.16.3", - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ImageRepository: "", }, @@ -119,7 +116,7 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { KCP: &v1alpha4.KubeadmControlPlane{ Spec: v1alpha4.KubeadmControlPlaneSpec{ Version: "v1.16.3", - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ImageRepository: "%%%", }, @@ -191,8 +188,7 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { ip-10-0-0-2.ec2.internal: advertiseAddress: 10.0.0.2 bindPort: 6443 - someFieldThatIsAddedInTheFuture: bar -apiVersion: kubeadm.k8s.io/vNbetaM +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus`, }, BinaryData: map[string][]byte{ @@ -245,8 +241,7 @@ kind: ClusterStatus`, ip-10-0-0-2.ec2.internal: advertiseAddress: 10.0.0.2 bindPort: 6443 - someFieldThatIsAddedInTheFuture: bar -apiVersion: kubeadm.k8s.io/vNbetaM +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus `, }, @@ -259,7 +254,7 @@ kind: ClusterStatus w := &Workload{ Client: fakeClient, } - err := w.RemoveMachineFromKubeadmConfigMap(ctx, tt.machine) + err := w.RemoveMachineFromKubeadmConfigMap(ctx, tt.machine, semver.MustParse("1.19.1")) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return @@ -272,7 +267,7 @@ kind: ClusterStatus client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.expectedEndpoints)) + g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.expectedEndpoints), cmp.Diff(tt.expectedEndpoints, actualConfig.Data[clusterStatusKey])) } }) } @@ -375,58 +370,146 @@ func TestUpdateKubeletConfigMap(t *testing.T) { } } -func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -kubernetesVersion: v1.16.1 -`, - }, - } - - kubeadmConfigNoKey := kubeadmConfig.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) - - kubeadmConfigBadData := kubeadmConfig.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `foobar` - +func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - version semver.Version - objs []client.Object - expectErr bool + name string + version semver.Version + objs []client.Object + mutator func(*bootstrapv1.ClusterConfiguration) + wantConfigMap *corev1.ConfigMap + wantErr bool }{ { - name: "updates the config map", - version: semver.Version{Major: 1, Minor: 17, Patch: 2}, - objs: []client.Object{kubeadmConfig}, - expectErr: false, + name: "fails if missing config map", + version: semver.MustParse("1.17.2"), + objs: nil, + wantErr: true, }, { - name: "returns error if cannot find config map", - version: semver.Version{Major: 1, Minor: 2}, - expectErr: true, + name: "fail if config map without ClusterConfiguration data", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{}, + }}, + wantErr: true, }, { - name: "returns error if config has bad data", - version: semver.Version{Major: 1, Minor: 2}, - objs: []client.Object{kubeadmConfigBadData}, - expectErr: true, + name: "fail if config map with invalid ClusterConfiguration data", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "foo", + }, + }}, + wantErr: true, }, { - name: "returns error if config doesn't have cluster config key", - version: semver.Version{Major: 1, Minor: 2}, - objs: []client.Object{kubeadmConfigNoKey}, - expectErr: true, + name: "no op if mutator does not apply changes", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.16.1\n", + }, + }}, + mutator: func(c *bootstrapv1.ClusterConfiguration) {}, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.16.1\n", + }, + }, + }, + { + name: "apply changes", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.16.1\n", + }, + }}, + mutator: func(c *bootstrapv1.ClusterConfiguration) { + c.KubernetesVersion = "v1.17.2" + }, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns: {}\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.17.2\n" + + "networking: {}\n" + + "scheduler: {}\n", + }, + }, + }, + { + name: "converts kubeadm api version during mutation if required", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiVersion: kubeadm.k8s.io/v1beta1\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.16.1\n", + }, + }}, + mutator: func(c *bootstrapv1.ClusterConfiguration) { + c.KubernetesVersion = "v1.17.2" + }, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns: {}\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.17.2\n" + + "networking: {}\n" + + "scheduler: {}\n", + }, + }, }, } @@ -434,77 +517,176 @@ kubernetesVersion: v1.16.1 t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + w := &Workload{ Client: fakeClient, } - err := w.UpdateKubernetesVersionInKubeadmConfigMap(ctx, tt.version) - if tt.expectErr { + err := w.updateClusterConfiguration(ctx, tt.mutator, tt.version) + if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) + var actualConfig corev1.ConfigMap g.Expect(w.Client.Get( ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring("kubernetesVersion: v1.17.2")) + g.Expect(actualConfig.Data[clusterConfigurationKey]).To(Equal(tt.wantConfigMap.Data[clusterConfigurationKey]), cmp.Diff(tt.wantConfigMap.Data[clusterConfigurationKey], actualConfig.Data[clusterConfigurationKey])) }) } } -func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: ` -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -imageRepository: k8s.gcr.io -`, - }, - } - - kubeadmConfigNoKey := kubeadmConfig.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) - - kubeadmConfigBadData := kubeadmConfig.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `foobar` - +func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - imageRepository string - objs []client.Object - expectErr bool + name string + version semver.Version + objs []client.Object + mutator func(status *bootstrapv1.ClusterStatus) + wantConfigMap *corev1.ConfigMap + wantErr bool }{ { - name: "updates the config map", - imageRepository: "myspecialrepo.io", - objs: []client.Object{kubeadmConfig}, - expectErr: false, + name: "fails if missing config map", + version: semver.MustParse("1.17.2"), + objs: nil, + wantErr: true, }, { - name: "returns error if cannot find config map", - expectErr: true, + name: "fail if config map without ClusterStatus data", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{}, + }}, + wantErr: true, + }, + { + name: "fail if config map with invalid ClusterStatus data", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "foo", + }, + }}, + wantErr: true, + }, + { + name: "no op if mutator does not apply changes", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + }}, + mutator: func(status *bootstrapv1.ClusterStatus) {}, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + }, }, { - name: "returns error if config has bad data", - objs: []client.Object{kubeadmConfigBadData}, - imageRepository: "myspecialrepo.io", - expectErr: true, + name: "apply changes", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + }}, + mutator: func(status *bootstrapv1.ClusterStatus) { + status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{} + }, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + " ip-10-0-0-2.ec2.internal: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + }, }, { - name: "returns error if config doesn't have cluster config key", - objs: []client.Object{kubeadmConfigNoKey}, - imageRepository: "myspecialrepo.io", - expectErr: true, + name: "converts kubeadm api version during mutation if required", + version: semver.MustParse("1.17.2"), + objs: []client.Object{&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + "apiVersion: kubeadm.k8s.io/v1beta1\n" + + "kind: ClusterStatus\n", + }, + }}, + mutator: func(status *bootstrapv1.ClusterStatus) { + status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{} + }, + wantConfigMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterStatusKey: "apiEndpoints:\n" + + " ip-10-0-0-1.ec2.internal:\n" + + " advertiseAddress: 10.0.0.1\n" + + " bindPort: 6443\n" + + " ip-10-0-0-2.ec2.internal: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterStatus\n", + }, + }, }, } @@ -512,373 +694,355 @@ imageRepository: k8s.gcr.io t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + w := &Workload{ Client: fakeClient, } - err := w.UpdateImageRepositoryInKubeadmConfigMap(ctx, tt.imageRepository) - if tt.expectErr { + err := w.updateClusterStatus(ctx, tt.mutator, tt.version) + if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) + var actualConfig corev1.ConfigMap g.Expect(w.Client.Get( ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.imageRepository)) + g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.wantConfigMap.Data[clusterStatusKey]), cmp.Diff(tt.wantConfigMap.Data[clusterStatusKey], actualConfig.Data[clusterStatusKey])) }) } } -func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { - validAPIServerConfig := `apiServer: - certSANs: - - foo - extraArgs: - foo: bar - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 - timeoutForControlPlane: 3m0s -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -` - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: validAPIServerConfig, +func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { + g := NewWithT(t) + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + tests := []struct { + name string + version semver.Version + clusterConfigurationData string + }{ + { + name: "updates the config map and changes the kubeadm API version", + version: semver.MustParse("1.17.2"), + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta1\n" + + "kind: ClusterConfiguration\n" + + "kubernetesVersion: v1.16.1\n", }, } - kubeadmConfigNoKey := kubeadmConfig.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() + + w := &Workload{ + Client: fakeClient, + } + err := w.UpdateKubernetesVersionInKubeadmConfigMap(ctx, tt.version) + g.Expect(err).ToNot(HaveOccurred()) - kubeadmConfigBadData := kubeadmConfig.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `badConfigAPIServer` + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.version.String())) + }) + } +} +func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - apiServer bootstrapv1.APIServer - objs []client.Object - expectErr bool - expectedChanged bool - expectedAPIServer string + name string + clusterConfigurationData string + newImageRepository string + wantImageRepository string }{ { - name: "updates the config map", - apiServer: bootstrapv1.APIServer{CertSANs: []string{"foo", "bar"}}, - objs: []client.Object{kubeadmConfig}, - expectErr: false, - expectedChanged: true, - expectedAPIServer: `apiServer: - certSANs: - - foo - - bar -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -`, - }, - { - name: "returns error if cannot find config map", - expectErr: true, - expectedAPIServer: validAPIServerConfig, - }, - { - name: "returns error if config has bad data", - objs: []client.Object{kubeadmConfigBadData}, - apiServer: bootstrapv1.APIServer{CertSANs: []string{"foo", "bar"}}, - expectErr: true, - expectedAPIServer: validAPIServerConfig, + name: "it should set the image repository", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n", + newImageRepository: "example.com/k8s", + wantImageRepository: "example.com/k8s", }, { - name: "returns error if config doesn't have cluster config key", - objs: []client.Object{kubeadmConfigNoKey}, - apiServer: bootstrapv1.APIServer{CertSANs: []string{"foo", "bar"}}, - expectErr: true, - expectedAPIServer: validAPIServerConfig, + name: "it should preserve the existing image repository if then new value is empty", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n" + + "imageRepository: foo.bar/baz.io\n", + newImageRepository: "", + wantImageRepository: "foo.bar/baz.io", }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() + + w := &Workload{ + Client: fakeClient, + } + err := w.UpdateImageRepositoryInKubeadmConfigMap(ctx, tt.newImageRepository, semver.MustParse("1.19.1")) + g.Expect(err).ToNot(HaveOccurred()) + + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.wantImageRepository)) + }) + } +} + +func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { + g := NewWithT(t) + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + tests := []struct { + name string + clusterConfigurationData string + newAPIServer bootstrapv1.APIServer + wantClusterConfiguration string + }{ { - name: "should not update config map if no changes are detected", - objs: []client.Object{kubeadmConfig}, - expectedChanged: false, - apiServer: bootstrapv1.APIServer{ + name: "it should set the api server config", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n", + newAPIServer: bootstrapv1.APIServer{ ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1", HostPath: "/foo/bar", MountPath: "/bar/baz"}}, + ExtraArgs: map[string]string{ + "bar": "baz", + "someKey": "someVal", + }, + ExtraVolumes: []bootstrapv1.HostPathMount{ + { + Name: "mount2", + HostPath: "/bar/baz", + MountPath: "/foo/bar", + }, + }, }, - CertSANs: []string{"foo"}, - TimeoutForControlPlane: &metav1.Duration{Duration: 3 * time.Minute}, }, - expectedAPIServer: validAPIServerConfig, + wantClusterConfiguration: "apiServer:\n" + + " extraArgs:\n" + + " bar: baz\n" + + " someKey: someVal\n" + + " extraVolumes:\n" + + " - hostPath: /bar/baz\n" + + " mountPath: /foo/bar\n" + + " name: mount2\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns: {}\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "networking: {}\n" + + "scheduler: {}\n", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } - - err := w.UpdateAPIServerInKubeadmConfigMap(ctx, tt.apiServer) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } + err := w.UpdateAPIServerInKubeadmConfigMap(ctx, tt.newAPIServer, semver.MustParse("1.19.1")) g.Expect(err).ToNot(HaveOccurred()) + var actualConfig corev1.ConfigMap g.Expect(w.Client.Get( ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.expectedAPIServer), cmp.Diff(actualConfig.Data[clusterConfigurationKey], tt.expectedAPIServer)) - - // check resource version to see if client.update was called or not - if !tt.expectedChanged { - g.Expect(tt.objs[0].GetResourceVersion()).Should(Equal(actualConfig.ResourceVersion)) - } else { - g.Expect(tt.objs[0].GetResourceVersion()).ShouldNot(Equal(actualConfig.ResourceVersion)) - } + g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) }) } } func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { - validControllerManagerConfig := `apiVersion: kubeadm.k8s.io/v1beta2 -controllerManager: - extraArgs: - foo: bar - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 -kind: ClusterConfiguration -` - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: validControllerManagerConfig, - }, - } - - kubeadmConfigNoKey := kubeadmConfig.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) - - kubeadmConfigBadData := kubeadmConfig.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `badConfigControllerManager` - g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - controllerManager bootstrapv1.ControlPlaneComponent - objs []client.Object - expectErr bool - expectedChanged bool - expectedControllerManager string + name string + clusterConfigurationData string + newControllerManager bootstrapv1.ControlPlaneComponent + wantClusterConfiguration string }{ { - name: "updates the config map", - controllerManager: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - objs: []client.Object{kubeadmConfig}, - expectErr: false, - expectedChanged: true, - expectedControllerManager: `apiVersion: kubeadm.k8s.io/v1beta2 -controllerManager: - extraArgs: - foo: bar -kind: ClusterConfiguration -`, - }, - { - name: "returns error if cannot find config map", - expectErr: true, - expectedControllerManager: validControllerManagerConfig, - }, - { - name: "returns error if config has bad data", - objs: []client.Object{kubeadmConfigBadData}, - controllerManager: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - expectErr: true, - expectedControllerManager: validControllerManagerConfig, - }, - { - name: "returns error if config doesn't have cluster config key", - objs: []client.Object{kubeadmConfigNoKey}, - controllerManager: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - expectErr: true, - expectedControllerManager: validControllerManagerConfig, - }, - { - name: "should not update config map if no changes are detected", - objs: []client.Object{kubeadmConfig}, - expectedChanged: false, - controllerManager: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1", HostPath: "/foo/bar", MountPath: "/bar/baz"}}, + name: "it should set the controller manager config", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n", + newControllerManager: bootstrapv1.ControlPlaneComponent{ + ExtraArgs: map[string]string{ + "bar": "baz", + "someKey": "someVal", + }, + ExtraVolumes: []bootstrapv1.HostPathMount{ + { + Name: "mount2", + HostPath: "/bar/baz", + MountPath: "/foo/bar", + }, + }, }, - expectedControllerManager: validControllerManagerConfig, + wantClusterConfiguration: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager:\n" + + " extraArgs:\n" + + " bar: baz\n" + + " someKey: someVal\n" + + " extraVolumes:\n" + + " - hostPath: /bar/baz\n" + + " mountPath: /foo/bar\n" + + " name: mount2\n" + + "dns: {}\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "networking: {}\n" + + "scheduler: {}\n", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } - err := w.UpdateControllerManagerInKubeadmConfigMap(ctx, tt.controllerManager) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } + err := w.UpdateControllerManagerInKubeadmConfigMap(ctx, tt.newControllerManager, semver.MustParse("1.19.1")) g.Expect(err).ToNot(HaveOccurred()) + var actualConfig corev1.ConfigMap g.Expect(w.Client.Get( ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.expectedControllerManager), cmp.Diff(actualConfig.Data[clusterConfigurationKey], tt.expectedControllerManager)) - - // check resource version to see if client.update was called or not - if !tt.expectedChanged { - g.Expect(tt.objs[0].GetResourceVersion()).Should(Equal(actualConfig.ResourceVersion)) - } else { - g.Expect(tt.objs[0].GetResourceVersion()).ShouldNot(Equal(actualConfig.ResourceVersion)) - } + g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) }) } } func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { - validSchedulerConfig := `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -scheduler: - extraArgs: - foo: bar - extraVolumes: - - hostPath: /foo/bar - mountPath: /bar/baz - name: mount1 -` - kubeadmConfig := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterConfigurationKey: validSchedulerConfig, - }, - } - - kubeadmConfigNoKey := kubeadmConfig.DeepCopy() - delete(kubeadmConfigNoKey.Data, clusterConfigurationKey) - - kubeadmConfigBadData := kubeadmConfig.DeepCopy() - kubeadmConfigBadData.Data[clusterConfigurationKey] = `badConfigScheduler` - g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { - name string - scheduler bootstrapv1.ControlPlaneComponent - objs []client.Object - expectErr bool - expectedChanged bool - expectedScheduler string + name string + clusterConfigurationData string + newScheduler bootstrapv1.ControlPlaneComponent + wantClusterConfiguration string }{ { - name: "updates the config map", - scheduler: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - objs: []client.Object{kubeadmConfig}, - expectErr: false, - expectedChanged: true, - expectedScheduler: `apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -scheduler: - extraArgs: - foo: bar -`, - }, - { - name: "returns error if cannot find config map", - expectErr: true, - expectedScheduler: validSchedulerConfig, - }, - { - name: "returns error if config has bad data", - objs: []client.Object{kubeadmConfigBadData}, - scheduler: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - expectErr: true, - expectedScheduler: validSchedulerConfig, - }, - { - name: "returns error if config doesn't have cluster config key", - objs: []client.Object{kubeadmConfigNoKey}, - scheduler: bootstrapv1.ControlPlaneComponent{ExtraArgs: map[string]string{"foo": "bar"}}, - expectErr: true, - expectedScheduler: validSchedulerConfig, - }, - { - name: "should not update config map if no changes are detected", - objs: []client.Object{kubeadmConfig}, - expectedChanged: false, - scheduler: bootstrapv1.ControlPlaneComponent{ - ExtraArgs: map[string]string{"foo": "bar"}, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1", HostPath: "/foo/bar", MountPath: "/bar/baz"}}, + name: "it should set the scheduler config", + clusterConfigurationData: "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "kind: ClusterConfiguration\n", + newScheduler: bootstrapv1.ControlPlaneComponent{ + ExtraArgs: map[string]string{ + "bar": "baz", + "someKey": "someVal", + }, + ExtraVolumes: []bootstrapv1.HostPathMount{ + { + Name: "mount2", + HostPath: "/bar/baz", + MountPath: "/foo/bar", + }, + }, }, - expectedScheduler: validSchedulerConfig, + wantClusterConfiguration: "apiServer: {}\n" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + + "controllerManager: {}\n" + + "dns: {}\n" + + "etcd: {}\n" + + "kind: ClusterConfiguration\n" + + "networking: {}\n" + + "scheduler:\n" + + " extraArgs:\n" + + " bar: baz\n" + + " someKey: someVal\n" + + " extraVolumes:\n" + + " - hostPath: /bar/baz\n" + + " mountPath: /foo/bar\n" + + " name: mount2\n", }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } - err := w.UpdateSchedulerInKubeadmConfigMap(ctx, tt.scheduler) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - return - } + err := w.UpdateSchedulerInKubeadmConfigMap(ctx, tt.newScheduler, semver.MustParse("1.19.1")) g.Expect(err).ToNot(HaveOccurred()) + var actualConfig corev1.ConfigMap g.Expect(w.Client.Get( ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.expectedScheduler)) - - // check resource version to see if client.update was called or not - if !tt.expectedChanged { - g.Expect(tt.objs[0].GetResourceVersion()).Should(Equal(actualConfig.ResourceVersion)) - } else { - g.Expect(tt.objs[0].GetResourceVersion()).ShouldNot(Equal(actualConfig.ResourceVersion)) - } + g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) }) } }