Skip to content

Commit

Permalink
Update kube-proxy daemonset image tag during KCP upgrade
Browse files Browse the repository at this point in the history
  • Loading branch information
Sedef committed Mar 9, 2020
1 parent 004ecfa commit 82b7f70
Show file tree
Hide file tree
Showing 5 changed files with 157 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,18 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
return r.upgradeControlPlane(ctx, cluster, kcp, ownedMachines, requireUpgrade)
}

// If we've made it this far, we we can assume that all ownedMachines are up to date
// If we've made it this far, we can assume that all ownedMachines are up to date

// Update kube-proxy image name
workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster))
if err != nil {
// Just log and continue, control-plane cluster may be uninitialized
logger.Error(err, "failed to get remote client for workload cluster")
} else if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, kcp); err != nil {
logger.Error(err, "failed to update kube-proxy image name in kube-proxy daemonset")
return ctrl.Result{}, err
}

numMachines := len(ownedMachines)
desiredReplicas := int(*kcp.Spec.Replicas)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ func TestReconcileInitializeControlPlane(t *testing.T) {
recorder: record.NewFakeRecorder(32),
managementCluster: &fakeManagementCluster{
Management: &internal.Management{Client: fakeClient},
Workload: fakeWorkloadCluster{},
Workload: fakeWorkloadCluster{Workload: &internal.Workload{Client: fakeClient}, Status: internal.ClusterStatus{}},
},
}

Expand Down
11 changes: 11 additions & 0 deletions controlplane/kubeadm/internal/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"strings"
"testing"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -225,6 +226,7 @@ type fakeClient struct {
get map[string]interface{}
getErr error
createErr error
patchErr error
}

func (f *fakeClient) Get(_ context.Context, key client.ObjectKey, obj runtime.Object) error {
Expand All @@ -239,6 +241,8 @@ func (f *fakeClient) Get(_ context.Context, key client.ObjectKey, obj runtime.Ob
l.DeepCopyInto(obj.(*rbacv1.RoleBinding))
case *rbacv1.Role:
l.DeepCopyInto(obj.(*rbacv1.Role))
case *appsv1.DaemonSet:
l.DeepCopyInto(obj.(*appsv1.DaemonSet))
case nil:
return apierrors.NewNotFound(schema.GroupResource{}, key.Name)
default:
Expand Down Expand Up @@ -266,6 +270,13 @@ func (f *fakeClient) Create(_ context.Context, _ runtime.Object, _ ...client.Cre
return nil
}

func (f *fakeClient) Patch(_ context.Context, _ runtime.Object, _ client.Patch, _ ...client.PatchOption) error {
if f.patchErr != nil {
return f.patchErr
}
return nil
}

func TestManagementCluster_healthCheck_NoError(t *testing.T) {
tests := []struct {
name string
Expand Down
57 changes: 57 additions & 0 deletions controlplane/kubeadm/internal/workload_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,27 @@ import (
"time"

"github.com/blang/semver"
"github.com/docker/distribution/reference"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd"
etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/certs"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
)

const (
kubeProxyDaemonSetName = "kube-proxy"
)

var (
ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported")
)
Expand All @@ -64,6 +72,7 @@ type WorkloadCluster interface {
UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version string) error
UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string) error
UpdateKubeletConfigMap(ctx context.Context, version semver.Version) error
UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error
RemoveEtcdMemberForMachine(ctx context.Context, machine *clusterv1.Machine) error
RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error
}
Expand Down Expand Up @@ -563,3 +572,51 @@ func firstNodeNotMatchingName(name string, nodes []corev1.Node) *corev1.Node {
}
return nil
}

// modifyImageTag takes an image string, and returns an updated tagged image
func modifyImageTag(image, tagName string) (string, error) {
namedRef, err := reference.ParseNormalizedNamed(image)
if err != nil {
return "", errors.Wrap(err, "failed to parse image name")

}
// return error if images use digest as version instead of tag
if _, isCanonical := namedRef.(reference.Canonical); isCanonical {
return "", errors.New("image uses digest as version, cannot update tag ")
}

// update the image tag with tagName
namedTagged, err := reference.WithTag(namedRef, tagName)
if err != nil {
return "", errors.Wrap(err, "failed to update image tag")
}

return reference.FamiliarString(reference.TagNameOnly(namedTagged)), nil
}

// UpdateKubeProxyImageInfo updates kube-proxy image in the kube-proxy DaemonSet.
func (w *Workload) UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error {
ds := &appsv1.DaemonSet{}

if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: kubeProxyDaemonSetName, Namespace: metav1.NamespaceSystem}, ds); err != nil {
if apierrors.IsNotFound(err) {
// if kube-proxy is missing, return without errors
return nil
}
return errors.Wrapf(err, "failed to determine if %s daemonset already exists", kubeProxyDaemonSetName)
}

newImageName, err := modifyImageTag(ds.Spec.Template.Spec.Containers[0].Image, kcp.Spec.Version)
if err != nil {
return err
}

if len(ds.Spec.Template.Spec.Containers) > 0 && ds.Spec.Template.Spec.Containers[0].Image != newImageName {
patch := client.MergeFrom(ds.DeepCopy())
ds.Spec.Template.Spec.Containers[0].Image = newImageName
if err := w.Client.Patch(ctx, ds, patch); err != nil {
return errors.Wrap(err, "error patching kube-proxy DaemonSet")
}
}
return nil
}
76 changes: 76 additions & 0 deletions controlplane/kubeadm/internal/workload_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,12 @@ import (
"testing"

"github.com/blang/semver"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
)

Expand Down Expand Up @@ -104,3 +107,76 @@ func TestCluster_ReconcileKubeletRBACBinding_Error(t *testing.T) {
})
}
}

func TestUpdateKubeProxyImageInfo(t *testing.T) {
ds := &appsv1.DaemonSet{
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{{Image: "k8s.gcr.io/kube-proxy:v1.16.3"}},
},
},
},
}

dsImageInDigestFormat := ds.DeepCopy()
dsImageInDigestFormat.Spec.Template.Spec.Containers[0].Image = "k8s.gcr.io/kube-proxy@sha256:47bfd"

dsImageEmpty := ds.DeepCopy()
dsImageEmpty.Spec.Template.Spec.Containers[0].Image = ""

tests := []struct {
name string
ds *appsv1.DaemonSet
expectErr bool
clientGet map[string]interface{}
patchErr error
}{
{
name: "succeeds if patch correctly",
ds: ds,
expectErr: false,
clientGet: map[string]interface{}{
"kube-system/" + kubeProxyDaemonSetName: ds,
},
},
{
name: "returns error if image in kube-proxy ds was in digest format",
ds: dsImageInDigestFormat,
expectErr: true,
clientGet: map[string]interface{}{
"kube-system/" + kubeProxyDaemonSetName: dsImageInDigestFormat,
},
},
{
name: "returns error if image in kube-proxy ds was in wrong format",
ds: ds,
expectErr: true,
clientGet: map[string]interface{}{
"kube-system/" + kubeProxyDaemonSetName: dsImageEmpty,
},
},
}

ctx := context.Background()

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

fakeClient := &fakeClient{
get: tt.clientGet,
}
c := &Workload{
Client: fakeClient,
}
err := c.UpdateKubeProxyImageInfo(ctx, &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "1.16.3"}})
if err != nil && !tt.expectErr {
t.Fatalf("expected no error, got %s", err)
}
if err == nil && tt.expectErr {
t.Fatal("expected error but got none")
}

})
}
}

0 comments on commit 82b7f70

Please sign in to comment.