From 82b7f70fad845242f098002d422c6d0e641ce37d Mon Sep 17 00:00:00 2001 From: Sedef Date: Mon, 9 Mar 2020 04:27:32 -0700 Subject: [PATCH] Update kube-proxy daemonset image tag during KCP upgrade --- .../kubeadm_control_plane_controller.go | 13 +++- .../kubeadm_control_plane_controller_test.go | 2 +- controlplane/kubeadm/internal/cluster_test.go | 11 +++ .../kubeadm/internal/workload_cluster.go | 57 ++++++++++++++ .../kubeadm/internal/workload_cluster_test.go | 76 +++++++++++++++++++ 5 files changed, 157 insertions(+), 2 deletions(-) diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go index 0780360ef70f..89951fc1f2a2 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go @@ -251,7 +251,18 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * return r.upgradeControlPlane(ctx, cluster, kcp, ownedMachines, requireUpgrade) } - // If we've made it this far, we we can assume that all ownedMachines are up to date + // If we've made it this far, we can assume that all ownedMachines are up to date + + // Update kube-proxy image name + workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + if err != nil { + // Just log and continue, control-plane cluster may be uninitialized + logger.Error(err, "failed to get remote client for workload cluster") + } else if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, kcp); err != nil { + logger.Error(err, "failed to update kube-proxy image name in kube-proxy daemonset") + return ctrl.Result{}, err + } + numMachines := len(ownedMachines) desiredReplicas := int(*kcp.Spec.Replicas) diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go index eb9839baaf05..4f131e00ffe7 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go @@ -540,7 +540,7 @@ func TestReconcileInitializeControlPlane(t *testing.T) { recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, - Workload: fakeWorkloadCluster{}, + Workload: fakeWorkloadCluster{Workload: &internal.Workload{Client: fakeClient}, Status: internal.ClusterStatus{}}, }, } diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index d03dcbd84a74..9e22b49f1ce2 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -23,6 +23,7 @@ import ( "strings" "testing" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -225,6 +226,7 @@ type fakeClient struct { get map[string]interface{} getErr error createErr error + patchErr error } func (f *fakeClient) Get(_ context.Context, key client.ObjectKey, obj runtime.Object) error { @@ -239,6 +241,8 @@ func (f *fakeClient) Get(_ context.Context, key client.ObjectKey, obj runtime.Ob l.DeepCopyInto(obj.(*rbacv1.RoleBinding)) case *rbacv1.Role: l.DeepCopyInto(obj.(*rbacv1.Role)) + case *appsv1.DaemonSet: + l.DeepCopyInto(obj.(*appsv1.DaemonSet)) case nil: return apierrors.NewNotFound(schema.GroupResource{}, key.Name) default: @@ -266,6 +270,13 @@ func (f *fakeClient) Create(_ context.Context, _ runtime.Object, _ ...client.Cre return nil } +func (f *fakeClient) Patch(_ context.Context, _ runtime.Object, _ client.Patch, _ ...client.PatchOption) error { + if f.patchErr != nil { + return f.patchErr + } + return nil +} + func TestManagementCluster_healthCheck_NoError(t *testing.T) { tests := []struct { name string diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 9d91c112033d..7e82097bc420 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -28,19 +28,27 @@ import ( "time" "github.com/blang/semver" + "github.com/docker/distribution/reference" "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/controller-runtime/pkg/client" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + kubeProxyDaemonSetName = "kube-proxy" +) + var ( ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported") ) @@ -64,6 +72,7 @@ type WorkloadCluster interface { UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version string) error UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string) error UpdateKubeletConfigMap(ctx context.Context, version semver.Version) error + UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error RemoveEtcdMemberForMachine(ctx context.Context, machine *clusterv1.Machine) error RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error } @@ -563,3 +572,51 @@ func firstNodeNotMatchingName(name string, nodes []corev1.Node) *corev1.Node { } return nil } + +// modifyImageTag takes an image string, and returns an updated tagged image +func modifyImageTag(image, tagName string) (string, error) { + namedRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return "", errors.Wrap(err, "failed to parse image name") + + } + // return error if images use digest as version instead of tag + if _, isCanonical := namedRef.(reference.Canonical); isCanonical { + return "", errors.New("image uses digest as version, cannot update tag ") + } + + // update the image tag with tagName + namedTagged, err := reference.WithTag(namedRef, tagName) + if err != nil { + return "", errors.Wrap(err, "failed to update image tag") + } + + return reference.FamiliarString(reference.TagNameOnly(namedTagged)), nil +} + +// UpdateKubeProxyImageInfo updates kube-proxy image in the kube-proxy DaemonSet. +func (w *Workload) UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error { + ds := &appsv1.DaemonSet{} + + if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: kubeProxyDaemonSetName, Namespace: metav1.NamespaceSystem}, ds); err != nil { + if apierrors.IsNotFound(err) { + // if kube-proxy is missing, return without errors + return nil + } + return errors.Wrapf(err, "failed to determine if %s daemonset already exists", kubeProxyDaemonSetName) + } + + newImageName, err := modifyImageTag(ds.Spec.Template.Spec.Containers[0].Image, kcp.Spec.Version) + if err != nil { + return err + } + + if len(ds.Spec.Template.Spec.Containers) > 0 && ds.Spec.Template.Spec.Containers[0].Image != newImageName { + patch := client.MergeFrom(ds.DeepCopy()) + ds.Spec.Template.Spec.Containers[0].Image = newImageName + if err := w.Client.Patch(ctx, ds, patch); err != nil { + return errors.Wrap(err, "error patching kube-proxy DaemonSet") + } + } + return nil +} diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 69d4f3495ab9..2f28e1be6ddd 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -22,9 +22,12 @@ import ( "testing" "github.com/blang/semver" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -104,3 +107,76 @@ func TestCluster_ReconcileKubeletRBACBinding_Error(t *testing.T) { }) } } + +func TestUpdateKubeProxyImageInfo(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Image: "k8s.gcr.io/kube-proxy:v1.16.3"}}, + }, + }, + }, + } + + dsImageInDigestFormat := ds.DeepCopy() + dsImageInDigestFormat.Spec.Template.Spec.Containers[0].Image = "k8s.gcr.io/kube-proxy@sha256:47bfd" + + dsImageEmpty := ds.DeepCopy() + dsImageEmpty.Spec.Template.Spec.Containers[0].Image = "" + + tests := []struct { + name string + ds *appsv1.DaemonSet + expectErr bool + clientGet map[string]interface{} + patchErr error + }{ + { + name: "succeeds if patch correctly", + ds: ds, + expectErr: false, + clientGet: map[string]interface{}{ + "kube-system/" + kubeProxyDaemonSetName: ds, + }, + }, + { + name: "returns error if image in kube-proxy ds was in digest format", + ds: dsImageInDigestFormat, + expectErr: true, + clientGet: map[string]interface{}{ + "kube-system/" + kubeProxyDaemonSetName: dsImageInDigestFormat, + }, + }, + { + name: "returns error if image in kube-proxy ds was in wrong format", + ds: ds, + expectErr: true, + clientGet: map[string]interface{}{ + "kube-system/" + kubeProxyDaemonSetName: dsImageEmpty, + }, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fakeClient := &fakeClient{ + get: tt.clientGet, + } + c := &Workload{ + Client: fakeClient, + } + err := c.UpdateKubeProxyImageInfo(ctx, &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "1.16.3"}}) + if err != nil && !tt.expectErr { + t.Fatalf("expected no error, got %s", err) + } + if err == nil && tt.expectErr { + t.Fatal("expected error but got none") + } + + }) + } +}