diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go index fc59ff28121f..06115291fe0c 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go @@ -43,7 +43,8 @@ type KubeadmControlPlaneSpec struct { Replicas *int32 `json:"replicas,omitempty"` // Version defines the desired Kubernetes version. - // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MinLength:=2 + // +kubebuilder:validation:Pattern:=^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ Version string `json:"version"` // InfrastructureTemplate is a required reference to a custom resource diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook.go index c5136e3b57d3..8cbbb94ee0db 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook.go @@ -18,12 +18,14 @@ package v1alpha3 import ( "encoding/json" + "fmt" "github.com/blang/semver" jsonpatch "github.com/evanphx/json-patch" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -55,6 +57,7 @@ func (in *KubeadmControlPlane) Default() { // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (in *KubeadmControlPlane) ValidateCreate() error { allErrs := in.validateCommon() + allErrs = append(allErrs, in.validateEtcd(nil)...) if len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs) } @@ -88,8 +91,6 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { prev := old.(*KubeadmControlPlane) - allErrs = append(allErrs, in.validateEtcd(prev)...) - originalJSON, err := json.Marshal(prev) if err != nil { return apierrors.NewInternalError(err) @@ -124,6 +125,8 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { } } + allErrs = append(allErrs, in.validateEtcd(prev)...) + if len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs) } @@ -234,6 +237,25 @@ func (in *KubeadmControlPlane) validateCommon() (allErrs field.ErrorList) { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "version"), in.Spec.Version, "must be a valid semantic version")) } + allErrs = append(allErrs, in.validateCoreDNS()...) + + return allErrs +} + +func (in *KubeadmControlPlane) validateCoreDNS() (allErrs field.ErrorList) { + if in.Spec.KubeadmConfigSpec.ClusterConfiguration == nil { + return allErrs + } + // TODO: Remove when kubeadm types include OpenAPI validation + if !util.ImageTagIsValid(in.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag) { + allErrs = append( + allErrs, + field.Forbidden( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), + fmt.Sprintf("tag %s is invalid", in.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag), + ), + ) + } return allErrs } @@ -242,26 +264,50 @@ func (in *KubeadmControlPlane) validateEtcd(prev *KubeadmControlPlane) (allErrs return allErrs } - if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External != nil && prev.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil { + // TODO: Remove when kubeadm types include OpenAPI validation + if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil && !util.ImageTagIsValid(in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageTag) { allErrs = append( allErrs, field.Forbidden( - field.NewPath("spec", "kubeadmConfigSpec", "initConfiguration", "etcd", "external"), - "cannot have both local and external etcd at the same time", + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "etcd", "local", "imageTag"), + fmt.Sprintf("tag %s is invalid", in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageTag), ), ) } - if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil && prev.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External != nil { + if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil && in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External != nil { allErrs = append( allErrs, field.Forbidden( - field.NewPath("spec", "kubeadmConfigSpec", "initConfiguration", "etcd", "local"), - "cannot have both local and external etcd at the same time", + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "etcd", "local"), + "cannot have both external and local etcd", ), ) } + // update validations + if prev != nil { + if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External != nil && prev.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil { + allErrs = append( + allErrs, + field.Forbidden( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "etcd", "external"), + "cannot change between external and local etcd", + ), + ) + } + + if in.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil && prev.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External != nil { + allErrs = append( + allErrs, + field.Forbidden( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "etcd", "local"), + "cannot change between external and local etcd", + ), + ) + } + } + return allErrs } diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook_test.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook_test.go index e9aa250f18de..9c2a8ae8b651 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook_test.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_webhook_test.go @@ -221,6 +221,21 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { ImageTag: "v9.1.1", }, } + + etcdLocalImageBuildTag := before.DeepCopy() + etcdLocalImageBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &kubeadmv1beta1.LocalEtcd{ + ImageMeta: kubeadmv1beta1.ImageMeta{ + ImageTag: "v9.1.1_validBuild1", + }, + } + + etcdLocalImageInvalidTag := before.DeepCopy() + etcdLocalImageInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &kubeadmv1beta1.LocalEtcd{ + ImageMeta: kubeadmv1beta1.ImageMeta{ + ImageTag: "v9.1.1+invalidBuild1", + }, + } + unsetEtcd := etcdLocalImageTag.DeepCopy() unsetEtcd.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = nil @@ -256,6 +271,22 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, } + dnsBuildTag := before.DeepCopy() + dnsBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = kubeadmv1beta1.DNS{ + ImageMeta: kubeadmv1beta1.ImageMeta{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v0.20.0_build1", + }, + } + + dnsInvalidTag := before.DeepCopy() + dnsInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = kubeadmv1beta1.DNS{ + ImageMeta: kubeadmv1beta1.ImageMeta{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v0.20.0+invalidBuild1", + }, + } + certificatesDir := before.DeepCopy() certificatesDir.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "a new certificates directory" @@ -392,6 +423,18 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { before: before, kcp: etcdLocalImageTag, }, + { + name: "should succeed when making a change to the local etcd image tag", + expectErr: false, + before: before, + kcp: etcdLocalImageBuildTag, + }, + { + name: "should fail when using an invalid etcd image tag", + expectErr: true, + before: before, + kcp: etcdLocalImageInvalidTag, + }, { name: "should fail when making a change to the cluster config's networking struct", expectErr: true, @@ -429,11 +472,23 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { kcp: scheduler, }, { - name: "should fail when making a change to the cluster config's dns", + name: "should succeed when making a change to the cluster config's dns", expectErr: false, before: before, kcp: dns, }, + { + name: "should succeed when using an valid DNS build", + expectErr: false, + before: before, + kcp: dnsBuildTag, + }, + { + name: "should fail when using an invalid DNS build", + expectErr: true, + before: before, + kcp: dnsInvalidTag, + }, { name: "should fail when making a change to the cluster config's certificatesDir", expectErr: true, diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index 23e1028d6c7f..4a521d87f5be 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -908,7 +908,8 @@ spec: type: string version: description: Version defines the desired Kubernetes version. - minLength: 1 + minLength: 2 + pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$ type: string required: - infrastructureTemplate diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index b4638b93fc4f..31dce053662d 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -40,12 +40,12 @@ import ( etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/cluster-api/util/patch" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - kubeProxyDaemonSetName = "kube-proxy" + kubeProxyKey = "kube-proxy" ) var ( @@ -578,28 +578,50 @@ func firstNodeNotMatchingName(name string, nodes []corev1.Node) *corev1.Node { func (w *Workload) UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error { ds := &appsv1.DaemonSet{} - if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: kubeProxyDaemonSetName, Namespace: metav1.NamespaceSystem}, ds); err != nil { + if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: kubeProxyKey, Namespace: metav1.NamespaceSystem}, ds); err != nil { if apierrors.IsNotFound(err) { // if kube-proxy is missing, return without errors return nil } - return errors.Wrapf(err, "failed to determine if %s daemonset already exists", kubeProxyDaemonSetName) + return errors.Wrapf(err, "failed to determine if %s daemonset already exists", kubeProxyKey) } - if len(ds.Spec.Template.Spec.Containers) == 0 { + container := findKubeProxyContainer(ds) + if container == nil { return nil } - newImageName, err := util.ModifyImageTag(ds.Spec.Template.Spec.Containers[0].Image, kcp.Spec.Version) + + newImageName, err := util.ModifyImageTag(container.Image, kcp.Spec.Version) if err != nil { return err } - if ds.Spec.Template.Spec.Containers[0].Image != newImageName { - patch := client.MergeFrom(ds.DeepCopy()) - ds.Spec.Template.Spec.Containers[0].Image = newImageName - if err := w.Client.Patch(ctx, ds, patch); err != nil { - return errors.Wrap(err, "error patching kube-proxy DaemonSet") + if container.Image != newImageName { + helper, err := patch.NewHelper(ds, w.Client) + if err != nil { + return err } + patchKubeProxyImage(ds, newImageName) + return helper.Patch(ctx, ds) } return nil } + +func findKubeProxyContainer(ds *appsv1.DaemonSet) *corev1.Container { + containers := ds.Spec.Template.Spec.Containers + for idx := range containers { + if containers[idx].Name == kubeProxyKey { + return &containers[idx] + } + } + return nil +} + +func patchKubeProxyImage(ds *appsv1.DaemonSet, image string) { + containers := ds.Spec.Template.Spec.Containers + for idx := range containers { + if containers[idx].Name == kubeProxyKey { + containers[idx].Image = image + } + } +} diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index a670b47f4c46..c293f4fd44e2 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -26,9 +26,12 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestCluster_ReconcileKubeletRBACBinding_NoError(t *testing.T) { @@ -108,53 +111,75 @@ func TestCluster_ReconcileKubeletRBACBinding_Error(t *testing.T) { } } -func TestUpdateKubeProxyImageInfo(t *testing.T) { - ds := &appsv1.DaemonSet{ +func newKubeProxyDS() appsv1.DaemonSet { + return appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeProxyKey, + Namespace: metav1.NamespaceSystem, + }, Spec: appsv1.DaemonSetSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - Containers: []corev1.Container{{Image: "k8s.gcr.io/kube-proxy:v1.16.2"}}, + Containers: []corev1.Container{ + { + Image: "k8s.gcr.io/kube-proxy:v1.16.2", + Name: "kube-proxy", + }, + }, }, }, }, } +} - dsImageInDigestFormat := ds.DeepCopy() - dsImageInDigestFormat.Spec.Template.Spec.Containers[0].Image = "k8s.gcr.io/kube-proxy@sha256:47bfd" +func newKubeProxyDSWithImage(image string) appsv1.DaemonSet { + ds := newKubeProxyDS() + ds.Spec.Template.Spec.Containers[0].Image = image + return ds +} + +func TestUpdateKubeProxyImageInfo(t *testing.T) { - dsImageEmpty := ds.DeepCopy() - dsImageEmpty.Spec.Template.Spec.Containers[0].Image = "" + scheme := runtime.NewScheme() + if err := appsv1.AddToScheme(scheme); err != nil { + t.Fatalf("unable to setup scheme: %s", err) + } tests := []struct { - name string - ds *appsv1.DaemonSet - expectErr bool - clientGet map[string]interface{} - patchErr error + name string + ds appsv1.DaemonSet + expectErr bool + expectImage string + clientGet map[string]interface{} + patchErr error + KCP *v1alpha3.KubeadmControlPlane }{ { - name: "succeeds if patch correctly", - ds: ds, - expectErr: false, - clientGet: map[string]interface{}{ - "kube-system/" + kubeProxyDaemonSetName: ds, - }, + name: "succeeds if patch correctly", + ds: newKubeProxyDS(), + expectErr: false, + expectImage: "k8s.gcr.io/kube-proxy:v1.16.3", + KCP: &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, }, { - name: "returns error if image in kube-proxy ds was in digest format", - ds: dsImageInDigestFormat, - expectErr: true, - clientGet: map[string]interface{}{ - "kube-system/" + kubeProxyDaemonSetName: dsImageInDigestFormat, - }, + name: "returns error if image in kube-proxy ds was in digest format", + ds: newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy@sha256:47bfd"), + expectErr: true, + expectImage: "k8s.gcr.io/kube-proxy@sha256:47bfd", + KCP: &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, + }, + { + name: "expects OCI compatible format of tag", + ds: newKubeProxyDS(), + expectErr: false, + expectImage: "k8s.gcr.io/kube-proxy:v1.16.3_build1", + KCP: &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "v1.16.3+build1"}}, }, { name: "returns error if image in kube-proxy ds was in wrong format", - ds: ds, + ds: newKubeProxyDSWithImage(""), expectErr: true, - clientGet: map[string]interface{}{ - "kube-system/" + kubeProxyDaemonSetName: dsImageEmpty, - }, + KCP: &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, }, } @@ -162,21 +187,43 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - - fakeClient := &fakeClient{ - get: tt.clientGet, + objects := []runtime.Object{ + &tt.ds, } - c := &Workload{ + fakeClient := fake.NewFakeClientWithScheme(scheme, objects...) + w := &Workload{ Client: fakeClient, } - err := c.UpdateKubeProxyImageInfo(ctx, &v1alpha3.KubeadmControlPlane{Spec: v1alpha3.KubeadmControlPlaneSpec{Version: "v1.16.3"}}) + err := w.UpdateKubeProxyImageInfo(ctx, tt.KCP) if err != nil && !tt.expectErr { t.Fatalf("expected no error, got %s", err) } if err == nil && tt.expectErr { t.Fatal("expected error but got none") } - + proxyImage, err := getProxyImageInfo(ctx, w.Client) + if err != nil { + t.Fatalf("expected no error, got %s", err) + } + if proxyImage != tt.expectImage { + t.Fatalf("expected proxy image %s, got %s", tt.expectImage, proxyImage) + } }) } } + +func getProxyImageInfo(ctx context.Context, client ctrlclient.Client) (string, error) { + ds := &appsv1.DaemonSet{} + + if err := client.Get(ctx, ctrlclient.ObjectKey{Name: kubeProxyKey, Namespace: metav1.NamespaceSystem}, ds); err != nil { + if apierrors.IsNotFound(err) { + return "", errors.New("no image found") + } + return "", errors.New("failed to determine if daemonset already exists") + } + container := findKubeProxyContainer(ds) + if container == nil { + return "", errors.New("unable to find container") + } + return container.Image, nil +} diff --git a/test/infrastructure/docker/e2e/docker_test.go b/test/infrastructure/docker/e2e/docker_test.go index a6bb5f017e97..e710c4ad21e2 100644 --- a/test/infrastructure/docker/e2e/docker_test.go +++ b/test/infrastructure/docker/e2e/docker_test.go @@ -311,7 +311,7 @@ type ClusterGenerator struct { func (c *ClusterGenerator) GenerateCluster(namespace string, replicas int32) (*clusterv1.Cluster, *infrav1.DockerCluster, *controlplanev1.KubeadmControlPlane, *infrav1.DockerMachineTemplate) { generatedName := fmt.Sprintf("test-%d", c.counter) c.counter++ - version := "1.16.3" + version := "v1.16.3" infraCluster := &infrav1.DockerCluster{ ObjectMeta: metav1.ObjectMeta{ diff --git a/util/util.go b/util/util.go index bff3255505a8..bb55790fe603 100644 --- a/util/util.go +++ b/util/util.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "math/rand" + "regexp" "strings" "time" @@ -51,6 +52,7 @@ var ( rnd = rand.New(rand.NewSource(time.Now().UnixNano())) ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.ClusterLabelName) ErrUnstructuredFieldNotFound = fmt.Errorf("field not found") + ociTagAllowedChars = regexp.MustCompile(`[^-a-zA-Z0-9_\.]`) ) // RandomString returns a random alphanumeric string. @@ -64,6 +66,8 @@ func RandomString(n int) string { // ModifyImageTag takes an imageName (e.g., registry/repo:tag), and returns an image name with updated tag func ModifyImageTag(imageName, tagName string) (string, error) { + normalisedTagName := SemverToOCIImageTag(tagName) + namedRef, err := reference.ParseNormalizedNamed(imageName) if err != nil { return "", errors.Wrap(err, "failed to parse image name") @@ -75,7 +79,7 @@ func ModifyImageTag(imageName, tagName string) (string, error) { } // update the image tag with tagName - namedTagged, err := reference.WithTag(namedRef, tagName) + namedTagged, err := reference.WithTag(namedRef, normalisedTagName) if err != nil { return "", errors.Wrap(err, "failed to update image tag") } @@ -83,6 +87,11 @@ func ModifyImageTag(imageName, tagName string) (string, error) { return reference.FamiliarString(reference.TagNameOnly(namedTagged)), nil } +// ImageTagIsValid ensures that a given image tag is compliant with the OCI spec +func ImageTagIsValid(tagName string) bool { + return !ociTagAllowedChars.MatchString(tagName) +} + // GetMachinesForCluster returns a list of machines associated with the cluster. func GetMachinesForCluster(ctx context.Context, c client.Client, cluster *clusterv1.Cluster) (*clusterv1.MachineList, error) { var machines clusterv1.MachineList @@ -99,6 +108,17 @@ func GetMachinesForCluster(ctx context.Context, c client.Client, cluster *cluste return &machines, nil } +// SemVerToOCIImageTag is a helper function that replaces all +// non-allowed symbols in tag strings with underscores. +// Image tag can only contain lowercase and uppercase letters, digits, +// underscores, periods and dashes. +// Current usage is for CI images where all of symbols except '+' are valid, +// but function is for generic usage where input can't be always pre-validated. +// Taken from k8s.io/cmd/kubeadm/app/util +func SemverToOCIImageTag(version string) string { + return ociTagAllowedChars.ReplaceAllString(version, "_") +} + // GetControlPlaneMachines returns a slice containing control plane machines. func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Machine) { for _, machine := range machines { diff --git a/util/util_test.go b/util/util_test.go index 08d466020e56..89f3a66fb24b 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -428,6 +428,17 @@ func TestGetMachinesForCluster(t *testing.T) { g.Expect(machines.Items[0].Labels[clusterv1.ClusterLabelName]).To(Equal(cluster.Name)) } +func TestModifyImageTag(t *testing.T) { + g := NewGomegaWithT(t) + t.Run("should ensure image is a docker compatible tag", func(t *testing.T) { + testTag := "v1.17.4+build1" + image := "example.com/image:1.17.3" + res, err := ModifyImageTag(image, testTag) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res).To(Equal("example.com/image:v1.17.4_build1")) + }) +} + func TestEnsureOwnerRef(t *testing.T) { g := NewGomegaWithT(t)