diff --git a/api/v1alpha4/conversion.go b/api/v1alpha4/conversion.go index b667eaa3c64e..586248501084 100644 --- a/api/v1alpha4/conversion.go +++ b/api/v1alpha4/conversion.go @@ -122,6 +122,7 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Patches = restored.Spec.Patches dst.Spec.Variables = restored.Spec.Variables dst.Spec.ControlPlane.MachineHealthCheck = restored.Spec.ControlPlane.MachineHealthCheck + dst.Spec.ControlPlane.NamingStrategy = restored.Spec.ControlPlane.NamingStrategy dst.Spec.ControlPlane.NodeDrainTimeout = restored.Spec.ControlPlane.NodeDrainTimeout dst.Spec.ControlPlane.NodeVolumeDetachTimeout = restored.Spec.ControlPlane.NodeVolumeDetachTimeout dst.Spec.ControlPlane.NodeDeletionTimeout = restored.Spec.ControlPlane.NodeDeletionTimeout @@ -129,6 +130,7 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { for i := range restored.Spec.Workers.MachineDeployments { dst.Spec.Workers.MachineDeployments[i].MachineHealthCheck = restored.Spec.Workers.MachineDeployments[i].MachineHealthCheck dst.Spec.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Workers.MachineDeployments[i].FailureDomain + dst.Spec.Workers.MachineDeployments[i].NamingStrategy = restored.Spec.Workers.MachineDeployments[i].NamingStrategy dst.Spec.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Workers.MachineDeployments[i].NodeDrainTimeout dst.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeout = restored.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeout dst.Spec.Workers.MachineDeployments[i].NodeDeletionTimeout = restored.Spec.Workers.MachineDeployments[i].NodeDeletionTimeout diff --git a/api/v1beta1/clusterclass_types.go b/api/v1beta1/clusterclass_types.go index 12fb52b5590e..6765d158c45f 100644 --- a/api/v1beta1/clusterclass_types.go +++ b/api/v1beta1/clusterclass_types.go @@ -106,6 +106,10 @@ type ControlPlaneClass struct { // +optional MachineHealthCheck *MachineHealthCheckClass `json:"machineHealthCheck,omitempty"` + // NamingStrategy allows changing the naming pattern used when creating the control plane provider object. + // +optional + NamingStrategy *ControlPlaneClassNamingStrategy `json:"namingStrategy,omitempty"` + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` @@ -127,6 +131,19 @@ type ControlPlaneClass struct { NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` } +// ControlPlaneClassNamingStrategy defines the naming strategy for control plane objects. +type ControlPlaneClassNamingStrategy struct { + // Template defines the template to use for generating the name of the ControlPlane object. + // If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + // If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + // get concatenated with a random suffix of length 5. + // The templating mechanism provides the following arguments: + // * `.cluster.name`: The name of the cluster object. + // * `.random`: A random alphanumeric string, without vowels, of length 5. + // +optional + Template *string `json:"template,omitempty"` +} + // WorkersClass is a collection of deployment classes. type WorkersClass struct { // MachineDeployments is a list of machine deployment classes that can be used to create @@ -157,6 +174,10 @@ type MachineDeploymentClass struct { // +optional FailureDomain *string `json:"failureDomain,omitempty"` + // NamingStrategy allows changing the naming pattern used when creating the MachineDeployment. + // +optional + NamingStrategy *MachineDeploymentClassNamingStrategy `json:"namingStrategy,omitempty"` + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` @@ -207,6 +228,20 @@ type MachineDeploymentClassTemplate struct { Infrastructure LocalObjectTemplate `json:"infrastructure"` } +// MachineDeploymentClassNamingStrategy defines the naming strategy for machine deployment objects. +type MachineDeploymentClassNamingStrategy struct { + // Template defines the template to use for generating the name of the MachineDeployment object. + // If not defined, it will fallback to `{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}`. + // If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + // get concatenated with a random suffix of length 5. + // The templating mechanism provides the following arguments: + // * `.cluster.name`: The name of the cluster object. + // * `.random`: A random alphanumeric string, without vowels, of length 5. + // * `.machineDeployment.topologyName`: The name of the MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name). + // +optional + Template *string `json:"template,omitempty"` +} + // MachineHealthCheckClass defines a MachineHealthCheck for a group of Machines. type MachineHealthCheckClass struct { // UnhealthyConditions contains a list of the conditions that determine diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 49ee41659261..cc8fd8d59c36 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -508,6 +508,11 @@ func (in *ControlPlaneClass) DeepCopyInto(out *ControlPlaneClass) { *out = new(MachineHealthCheckClass) (*in).DeepCopyInto(*out) } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(ControlPlaneClassNamingStrategy) + (*in).DeepCopyInto(*out) + } if in.NodeDrainTimeout != nil { in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout *out = new(metav1.Duration) @@ -535,6 +540,26 @@ func (in *ControlPlaneClass) DeepCopy() *ControlPlaneClass { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneClassNamingStrategy) DeepCopyInto(out *ControlPlaneClassNamingStrategy) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneClassNamingStrategy. +func (in *ControlPlaneClassNamingStrategy) DeepCopy() *ControlPlaneClassNamingStrategy { + if in == nil { + return nil + } + out := new(ControlPlaneClassNamingStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneTopology) DeepCopyInto(out *ControlPlaneTopology) { *out = *in @@ -912,6 +937,11 @@ func (in *MachineDeploymentClass) DeepCopyInto(out *MachineDeploymentClass) { *out = new(string) **out = **in } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(MachineDeploymentClassNamingStrategy) + (*in).DeepCopyInto(*out) + } if in.NodeDrainTimeout != nil { in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout *out = new(metav1.Duration) @@ -949,6 +979,26 @@ func (in *MachineDeploymentClass) DeepCopy() *MachineDeploymentClass { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentClassNamingStrategy) DeepCopyInto(out *MachineDeploymentClassNamingStrategy) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentClassNamingStrategy. +func (in *MachineDeploymentClassNamingStrategy) DeepCopy() *MachineDeploymentClassNamingStrategy { + if in == nil { + return nil + } + out := new(MachineDeploymentClassNamingStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeploymentClassTemplate) DeepCopyInto(out *MachineDeploymentClassTemplate) { *out = *in diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index 71ea0311f9db..a54ed09374a5 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -48,6 +48,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.ClusterVariable": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterVariable(ref), "sigs.k8s.io/cluster-api/api/v1beta1.Condition": schema_sigsk8sio_cluster_api_api_v1beta1_Condition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClassNamingStrategy(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneTopology": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneTopology(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ExternalPatchDefinition": schema_sigsk8sio_cluster_api_api_v1beta1_ExternalPatchDefinition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.FailureDomainSpec": schema_sigsk8sio_cluster_api_api_v1beta1_FailureDomainSpec(ref), @@ -59,6 +60,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress": schema_sigsk8sio_cluster_api_api_v1beta1_MachineAddress(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeployment": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeployment(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClass": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassNamingStrategy(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassTemplate(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentList": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentList(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentSpec(ref), @@ -954,6 +956,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"), }, }, + "namingStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "NamingStrategy allows changing the naming pattern used when creating the control plane provider object.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy"), + }, + }, "nodeDrainTimeout": { SchemaProps: spec.SchemaProps{ Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology.", @@ -977,7 +985,27 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClassNamingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControlPlaneClassNamingStrategy defines the naming strategy for control plane objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the template to use for generating the name of the ControlPlane object. If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will get concatenated with a random suffix of length 5. The templating mechanism provides the following arguments: * `.cluster.name`: The name of the cluster object. * `.random`: A random alphanumeric string, without vowels, of length 5.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -1555,6 +1583,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. Format: "", }, }, + "namingStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "NamingStrategy allows changing the naming pattern used when creating the MachineDeployment.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy"), + }, + }, "nodeDrainTimeout": { SchemaProps: spec.SchemaProps{ Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", @@ -1591,7 +1625,27 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassNamingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineDeploymentClassNamingStrategy defines the naming strategy for machine deployment objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the template to use for generating the name of the MachineDeployment object. If not defined, it will fallback to `{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}`. If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will get concatenated with a random suffix of length 5. The templating mechanism provides the following arguments: * `.cluster.name`: The name of the cluster object. * `.random`: A random alphanumeric string, without vowels, of length 5. * `.machineDeployment.topologyName`: The name of the MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index 0f37b39fe373..51d57b6098b5 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -587,6 +587,22 @@ spec: More info: http://kubernetes.io/docs/user-guide/labels' type: object type: object + namingStrategy: + description: NamingStrategy allows changing the naming pattern + used when creating the control plane provider object. + properties: + template: + description: 'Template defines the template to use for generating + the name of the ControlPlane object. If not defined, it + will fallback to `{{ .cluster.name }}-{{ .random }}`. If + the templated string exceeds 63 characters, it will be trimmed + to 58 characters and will get concatenated with a random + suffix of length 5. The templating mechanism provides the + following arguments: * `.cluster.name`: The name of the + cluster object. * `.random`: A random alphanumeric string, + without vowels, of length 5.' + type: string + type: object nodeDeletionTimeout: description: 'NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after @@ -1156,6 +1172,25 @@ spec: using this MachineDeploymentClass.' format: int32 type: integer + namingStrategy: + description: NamingStrategy allows changing the naming pattern + used when creating the MachineDeployment. + properties: + template: + description: 'Template defines the template to use for + generating the name of the MachineDeployment object. + If not defined, it will fallback to `{{ .cluster.name + }}-{{ .machineDeployment.topologyName }}-{{ .random + }}`. If the templated string exceeds 63 characters, + it will be trimmed to 58 characters and will get concatenated + with a random suffix of length 5. The templating mechanism + provides the following arguments: * `.cluster.name`: + The name of the cluster object. * `.random`: A random + alphanumeric string, without vowels, of length 5. + * `.machineDeployment.topologyName`: The name of the + MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name).' + type: string + type: object nodeDeletionTimeout: description: 'NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index aac0115b94f2..cc48434765cf 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/storage/names" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -38,6 +37,7 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" + "sigs.k8s.io/cluster-api/internal/topology/names" "sigs.k8s.io/cluster-api/util" ) @@ -131,7 +131,7 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - namePrefix: fmt.Sprintf("%s-", cluster.Name), + nameGenerator: names.SimpleNameGenerator(fmt.Sprintf("%s-", cluster.Name)), currentObjectRef: currentRef, // Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning // of the infrastructure cluster starts no matter of the object being actually referenced by the Cluster itself. @@ -169,18 +169,17 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop } } - controlPlaneInfrastructureMachineTemplate := templateToTemplate(templateToInput{ + return templateToTemplate(templateToInput{ template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - namePrefix: controlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name), + nameGenerator: names.SimpleNameGenerator(controlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name)), currentObjectRef: currentRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and updating the Cluster object // with the reference to the ControlPlane object using this template. ownerRef: ownerReferenceTo(s.Current.Cluster), }) - return controlPlaneInfrastructureMachineTemplate, nil } // computeControlPlane computes the desired state for the ControlPlane object starting from the @@ -206,11 +205,16 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations) + nameTemplate := "{{ .cluster.name }}-{{ .random }}" + if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != nil { + nameTemplate = *s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template + } + controlPlane, err := templateToObject(templateToInput{ template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - namePrefix: fmt.Sprintf("%s-", cluster.Name), + nameGenerator: names.ControlPlaneNameGenerator(nameTemplate, cluster.Name), currentObjectRef: currentRef, labels: controlPlaneLabels, annotations: controlPlaneAnnotations, @@ -578,17 +582,21 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, machineDeployme if currentMachineDeployment != nil && currentMachineDeployment.BootstrapTemplate != nil { currentBootstrapTemplateRef = currentMachineDeployment.Object.Spec.Template.Spec.Bootstrap.ConfigRef } - desiredMachineDeployment.BootstrapTemplate = templateToTemplate(templateToInput{ + var err error + desiredMachineDeployment.BootstrapTemplate, err = templateToTemplate(templateToInput{ template: machineDeploymentBlueprint.BootstrapTemplate, templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.BootstrapTemplate), cluster: s.Current.Cluster, - namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name), + nameGenerator: names.SimpleNameGenerator(bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), currentObjectRef: currentBootstrapTemplateRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachineDeployment object // with the reference to the ControlPlane object using this template. ownerRef: ownerReferenceTo(s.Current.Cluster), }) + if err != nil { + return nil, err + } bootstrapTemplateLabels := desiredMachineDeployment.BootstrapTemplate.GetLabels() if bootstrapTemplateLabels == nil { @@ -603,17 +611,20 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, machineDeployme if currentMachineDeployment != nil && currentMachineDeployment.InfrastructureMachineTemplate != nil { currentInfraMachineTemplateRef = ¤tMachineDeployment.Object.Spec.Template.Spec.InfrastructureRef } - desiredMachineDeployment.InfrastructureMachineTemplate = templateToTemplate(templateToInput{ + desiredMachineDeployment.InfrastructureMachineTemplate, err = templateToTemplate(templateToInput{ template: machineDeploymentBlueprint.InfrastructureMachineTemplate, templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.InfrastructureMachineTemplate), cluster: s.Current.Cluster, - namePrefix: infrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name), + nameGenerator: names.SimpleNameGenerator(infrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), currentObjectRef: currentInfraMachineTemplateRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachineDeployment object // with the reference to the ControlPlane object using this template. ownerRef: ownerReferenceTo(s.Current.Cluster), }) + if err != nil { + return nil, err + } infraMachineTemplateLabels := desiredMachineDeployment.InfrastructureMachineTemplate.GetLabels() if infraMachineTemplateLabels == nil { @@ -665,13 +676,23 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, machineDeployme return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine template ref") } + nameTemplate := "{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}" + if machineDeploymentClass.NamingStrategy != nil && machineDeploymentClass.NamingStrategy.Template != nil { + nameTemplate = *machineDeploymentClass.NamingStrategy.Template + } + + name, err := names.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName() + if err != nil { + return nil, errors.Wrap(err, "failed to generate name for MachineDeployment") + } + desiredMachineDeploymentObj := &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ Kind: clusterv1.GroupVersion.WithKind("MachineDeployment").Kind, APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-%s-", s.Current.Cluster.Name, machineDeploymentTopology.Name)), + Name: name, Namespace: s.Current.Cluster.Namespace, }, Spec: clusterv1.MachineDeploymentSpec{ @@ -874,7 +895,7 @@ type templateToInput struct { template *unstructured.Unstructured templateClonedFromRef *corev1.ObjectReference cluster *clusterv1.Cluster - namePrefix string + nameGenerator names.NameGenerator currentObjectRef *corev1.ObjectReference labels map[string]string annotations map[string]string @@ -913,8 +934,12 @@ func templateToObject(in templateToInput) (*unstructured.Unstructured, error) { // Ensure the generated objects have a meaningful name. // NOTE: In case there is already a ref to this object in the Cluster, re-use the same name - // in order to simplify compare at later stages of the reconcile process. - object.SetName(names.SimpleNameGenerator.GenerateName(in.namePrefix)) + // in order to simplify comparison at later stages of the reconcile process. + name, err := in.nameGenerator.GenerateName() + if err != nil { + return nil, errors.Wrapf(err, "failed to generate name for %s", object.GetKind()) + } + object.SetName(name) if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 { object.SetName(in.currentObjectRef.Name) } @@ -927,7 +952,7 @@ func templateToObject(in templateToInput) (*unstructured.Unstructured, error) { // and assigning a meaningful name (or reusing current reference name). // NOTE: We are creating a copy of the ClusterClass template for each cluster so // it is possible to add cluster specific information without affecting the original object. -func templateToTemplate(in templateToInput) *unstructured.Unstructured { +func templateToTemplate(in templateToInput) (*unstructured.Unstructured, error) { template := &unstructured.Unstructured{} in.template.DeepCopyInto(template) @@ -973,13 +998,17 @@ func templateToTemplate(in templateToInput) *unstructured.Unstructured { // Ensure the generated template gets a meaningful name. // NOTE: In case there is already an object ref to this template, it is required to re-use the same name - // in order to simplify compare at later stages of the reconcile process. - template.SetName(names.SimpleNameGenerator.GenerateName(in.namePrefix)) + // in order to simplify comparison at later stages of the reconcile process. + name, err := in.nameGenerator.GenerateName() + if err != nil { + return nil, errors.Wrapf(err, "failed to generate name for %s", template.GetKind()) + } + template.SetName(name) if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 { template.SetName(in.currentObjectRef.Name) } - return template + return template, nil } func ownerReferenceTo(obj client.Object) *metav1.OwnerReference { diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index a301c840eeb7..91079163a4a3 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/internal/topology/names" "sigs.k8s.io/cluster-api/util" ) @@ -1995,7 +1996,7 @@ func TestTemplateToObject(t *testing.T) { template: template, templateClonedFromRef: fakeRef1, cluster: cluster, - namePrefix: cluster.Name, + nameGenerator: names.SimpleNameGenerator(cluster.Name), currentObjectRef: nil, }) g.Expect(err).ToNot(HaveOccurred()) @@ -2015,7 +2016,7 @@ func TestTemplateToObject(t *testing.T) { template: template, templateClonedFromRef: fakeRef1, cluster: cluster, - namePrefix: cluster.Name, + nameGenerator: names.SimpleNameGenerator(cluster.Name), currentObjectRef: fakeRef2, }) g.Expect(err).ToNot(HaveOccurred()) @@ -2052,13 +2053,14 @@ func TestTemplateToTemplate(t *testing.T) { t.Run("Generates a template from a template", func(t *testing.T) { g := NewWithT(t) - obj := templateToTemplate(templateToInput{ + obj, err := templateToTemplate(templateToInput{ template: template, templateClonedFromRef: fakeRef1, cluster: cluster, - namePrefix: cluster.Name, + nameGenerator: names.SimpleNameGenerator(cluster.Name), currentObjectRef: nil, }) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) assertTemplateToTemplate(g, assertTemplateInput{ cluster: cluster, @@ -2070,13 +2072,14 @@ func TestTemplateToTemplate(t *testing.T) { }) t.Run("Overrides the generated name if there is already a reference", func(t *testing.T) { g := NewWithT(t) - obj := templateToTemplate(templateToInput{ + obj, err := templateToTemplate(templateToInput{ template: template, templateClonedFromRef: fakeRef1, cluster: cluster, - namePrefix: cluster.Name, + nameGenerator: names.SimpleNameGenerator(cluster.Name), currentObjectRef: fakeRef2, }) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) assertTemplateToTemplate(g, assertTemplateInput{ cluster: cluster, diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index 49f1b3ea363c..10c7500b75cf 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -253,6 +253,7 @@ type ClusterClassBuilder struct { controlPlaneNodeDrainTimeout *metav1.Duration controlPlaneNodeVolumeDetachTimeout *metav1.Duration controlPlaneNodeDeletionTimeout *metav1.Duration + controlPlaneNamingStrategy *clusterv1.ControlPlaneClassNamingStrategy machineDeploymentClasses []clusterv1.MachineDeploymentClass variables []clusterv1.ClusterClassVariable statusVariables []clusterv1.ClusterClassStatusVariable @@ -318,6 +319,12 @@ func (c *ClusterClassBuilder) WithControlPlaneNodeDeletionTimeout(t *metav1.Dura return c } +// WithControlPlaneNamingStrategy sets the NamingStrategy for the ControlPlane to the ClusterClassBuilder. +func (c *ClusterClassBuilder) WithControlPlaneNamingStrategy(n *clusterv1.ControlPlaneClassNamingStrategy) *ClusterClassBuilder { + c.controlPlaneNamingStrategy = n + return c +} + // WithVariables adds the Variables to the ClusterClassBuilder. func (c *ClusterClassBuilder) WithVariables(vars ...clusterv1.ClusterClassVariable) *ClusterClassBuilder { c.variables = vars @@ -394,6 +401,9 @@ func (c *ClusterClassBuilder) Build() *clusterv1.ClusterClass { Ref: objToRef(c.controlPlaneInfrastructureMachineTemplate), } } + if c.controlPlaneNamingStrategy != nil { + obj.Spec.ControlPlane.NamingStrategy = c.controlPlaneNamingStrategy + } obj.Spec.Workers.MachineDeployments = c.machineDeploymentClasses return obj @@ -413,6 +423,7 @@ type MachineDeploymentClassBuilder struct { nodeDeletionTimeout *metav1.Duration minReadySeconds *int32 strategy *clusterv1.MachineDeploymentStrategy + namingStrategy *clusterv1.MachineDeploymentClassNamingStrategy } // MachineDeploymentClass returns a MachineDeploymentClassBuilder with the given name and namespace. @@ -488,6 +499,12 @@ func (m *MachineDeploymentClassBuilder) WithStrategy(s *clusterv1.MachineDeploym return m } +// WithNamingStrategy sets the NamingStrategy for the MachineDeploymentClassBuilder. +func (m *MachineDeploymentClassBuilder) WithNamingStrategy(n *clusterv1.MachineDeploymentClassNamingStrategy) *MachineDeploymentClassBuilder { + m.namingStrategy = n + return m +} + // Build creates a full MachineDeploymentClass object with the variables passed to the MachineDeploymentClassBuilder. func (m *MachineDeploymentClassBuilder) Build() *clusterv1.MachineDeploymentClass { obj := &clusterv1.MachineDeploymentClass{ @@ -526,6 +543,9 @@ func (m *MachineDeploymentClassBuilder) Build() *clusterv1.MachineDeploymentClas if m.strategy != nil { obj.Strategy = m.strategy } + if m.namingStrategy != nil { + obj.NamingStrategy = m.namingStrategy + } return obj } diff --git a/internal/test/builder/zz_generated.deepcopy.go b/internal/test/builder/zz_generated.deepcopy.go index c1efb456be8a..7c4656352bc5 100644 --- a/internal/test/builder/zz_generated.deepcopy.go +++ b/internal/test/builder/zz_generated.deepcopy.go @@ -134,6 +134,11 @@ func (in *ClusterClassBuilder) DeepCopyInto(out *ClusterClassBuilder) { *out = new(v1.Duration) **out = **in } + if in.controlPlaneNamingStrategy != nil { + in, out := &in.controlPlaneNamingStrategy, &out.controlPlaneNamingStrategy + *out = new(v1beta1.ControlPlaneClassNamingStrategy) + (*in).DeepCopyInto(*out) + } if in.machineDeploymentClasses != nil { in, out := &in.machineDeploymentClasses, &out.machineDeploymentClasses *out = make([]v1beta1.MachineDeploymentClass, len(*in)) @@ -447,6 +452,11 @@ func (in *MachineDeploymentClassBuilder) DeepCopyInto(out *MachineDeploymentClas *out = new(v1beta1.MachineDeploymentStrategy) (*in).DeepCopyInto(*out) } + if in.namingStrategy != nil { + in, out := &in.namingStrategy, &out.namingStrategy + *out = new(v1beta1.MachineDeploymentClassNamingStrategy) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentClassBuilder. diff --git a/internal/topology/names/names.go b/internal/topology/names/names.go new file mode 100644 index 000000000000..38f86671bc5a --- /dev/null +++ b/internal/topology/names/names.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names implements name generators for managed topology. +package names + +import ( + "bytes" + "fmt" + "text/template" + + "github.com/pkg/errors" + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// This is a copy of the constants at k8s.io/apiserver/pkg/storage/names. +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +type simpleNameGenerator struct { + base string +} + +func (s *simpleNameGenerator) GenerateName() (string, error) { + base := s.base + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)), nil +} + +// NameGenerator generates names for objects. +type NameGenerator interface { + // GenerateName generates a valid name. The generator is responsible for + // knowing the maximum valid name length. + GenerateName() (string, error) +} + +// SimpleNameGenerator returns a NameGenerator which is based on +// k8s.io/apiserver/pkg/storage/names.SimpleNameGenerator. +func SimpleNameGenerator(base string) NameGenerator { + return &simpleNameGenerator{ + base: base, + } +} + +// ControlPlaneNameGenerator returns a generator for creating a control plane name. +func ControlPlaneNameGenerator(templateString, clusterName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{}) +} + +// MachineDeploymentNameGenerator returns a generator for creating a machinedeployment name. +func MachineDeploymentNameGenerator(templateString, clusterName, topologyName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{ + "machineDeployment": map[string]interface{}{ + "topologyName": topologyName, + }, + }) +} + +// templateGenerator parses the template string as text/template and executes it using +// the passed data to generate a name. +type templateGenerator struct { + template string + data map[string]interface{} +} + +func newTemplateGenerator(template, clusterName string, data map[string]interface{}) NameGenerator { + data["cluster"] = map[string]interface{}{ + "name": clusterName, + } + data["random"] = utilrand.String(randomLength) + + return &templateGenerator{ + template: template, + data: data, + } +} + +func (g *templateGenerator) GenerateName() (string, error) { + tpl, err := template.New("template name generator").Option("missingkey=error").Parse(g.template) + if err != nil { + return "", errors.Wrapf(err, "parsing template %q", g.template) + } + + var buf bytes.Buffer + if err := tpl.Execute(&buf, g.data); err != nil { + return "", errors.Wrap(err, "rendering template") + } + + name := buf.String() + + // If the name exceeds the maxNameLength: trim to maxGeneratedNameLength and add + // a random suffix. + if len(name) > maxNameLength { + name = name[:maxGeneratedNameLength] + utilrand.String(randomLength) + } + + return name, nil +} diff --git a/internal/topology/names/names_test.go b/internal/topology/names/names_test.go new file mode 100644 index 000000000000..2a804687b071 --- /dev/null +++ b/internal/topology/names/names_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +func Test_templateGenerator_GenerateName(t *testing.T) { + tests := []struct { + name string + template string + data map[string]interface{} + want []types.GomegaMatcher + wantErr bool + }{ + { + name: "simple template", + template: "some-simple-{{ .test }}", + data: map[string]interface{}{ + "test": "testdata", + }, + want: []types.GomegaMatcher{ + Equal("some-simple-testdata"), + }, + }, + { + name: "name which gets trimmed and added a random suffix with 5 characters", + template: fmt.Sprintf("%064d", 0), + want: []types.GomegaMatcher{ + HavePrefix(fmt.Sprintf("%058d", 0)), + Not(HaveSuffix("00000")), + }, + }, + { + name: "name which does not get trimmed", + template: fmt.Sprintf("%063d", 0), + want: []types.GomegaMatcher{ + Equal(fmt.Sprintf("%063d", 0)), + }, + }, + { + name: "error on parsing template", + template: "some-hardcoded-name-{{ .doesnotexistindata", + wantErr: true, + }, + { + name: "error on due to missing key in data", + template: "some-hardcoded-name-{{ .doesnotexistindata }}", + data: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + generator := &templateGenerator{ + template: tt.template, + data: tt.data, + } + got, err := generator.GenerateName() + if (err != nil) != tt.wantErr { + t.Errorf("templateGenerator.GenerateName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if len(got) > maxNameLength { + t.Errorf("generated name should never be longer than %d, got %d", maxNameLength, len(got)) + } + for _, matcher := range tt.want { + g.Expect(got).To(matcher) + } + }) + } +} diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index a90fa33fb595..61cb24e3b9ad 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,6 +38,7 @@ import ( "sigs.k8s.io/cluster-api/api/v1beta1/index" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/topology/check" + "sigs.k8s.io/cluster-api/internal/topology/names" "sigs.k8s.io/cluster-api/internal/topology/variables" ) @@ -148,6 +150,9 @@ func (webhook *ClusterClass) validate(ctx context.Context, oldClusterClass, newC // Ensure MachineHealthChecks are valid. allErrs = append(allErrs, validateMachineHealthCheckClasses(newClusterClass)...) + // Ensure NamingStrategies are valid. + allErrs = append(allErrs, validateNamingStrategies(newClusterClass)...) + // Validate variables. allErrs = append(allErrs, variables.ValidateClusterClassVariables(ctx, newClusterClass.Spec.Variables, field.NewPath("spec", "variables"))..., @@ -349,6 +354,49 @@ func validateMachineHealthCheckClasses(clusterClass *clusterv1.ClusterClass) fie return allErrs } +func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + + if clusterClass.Spec.ControlPlane.NamingStrategy != nil && clusterClass.Spec.ControlPlane.NamingStrategy.Template != nil { + name, err := names.ControlPlaneNameGenerator(*clusterClass.Spec.ControlPlane.NamingStrategy.Template, "cluster").GenerateName() + templateFldPath := field.NewPath("spec", "controlPlane", "namingStrategy", "template") + if err != nil { + allErrs = append(allErrs, + field.Invalid( + templateFldPath, + *clusterClass.Spec.ControlPlane.NamingStrategy.Template, + fmt.Sprintf("invalid ControlPlane name template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, field.Invalid(templateFldPath, *clusterClass.Spec.ControlPlane.NamingStrategy.Template, err)) + } + } + } + + for i, md := range clusterClass.Spec.Workers.MachineDeployments { + if md.NamingStrategy == nil || md.NamingStrategy.Template == nil { + continue + } + name, err := names.MachineDeploymentNameGenerator(*md.NamingStrategy.Template, "cluster", "mdtopology").GenerateName() + templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("namingStrategy", "template") + if err != nil { + allErrs = append(allErrs, + field.Invalid( + templateFldPath, + *md.NamingStrategy.Template, + fmt.Sprintf("invalid MachineDeployment name template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, field.Invalid(templateFldPath, *md.NamingStrategy.Template, err)) + } + } + } + + return allErrs +} + // validateMachineHealthCheckClass validates the MachineHealthCheckSpec fields defined in a MachineHealthCheckClass. func validateMachineHealthCheckClass(fldPath *field.Path, namepace string, m *clusterv1.MachineHealthCheckClass) field.ErrorList { mhc := clusterv1.MachineHealthCheck{ diff --git a/internal/webhooks/clusterclass_test.go b/internal/webhooks/clusterclass_test.go index 444e9268a058..bdf61f11ef4a 100644 --- a/internal/webhooks/clusterclass_test.go +++ b/internal/webhooks/clusterclass_test.go @@ -1140,6 +1140,103 @@ func TestClusterClassValidation(t *testing.T) { Build(), expectErr: true, }, + { + name: "should not return error for valid namingStrategy.template", + in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate( + builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithControlPlaneTemplate( + builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). + Build()). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("{{ .cluster.name }}-cp-{{ .random }}")}). + WithControlPlaneInfrastructureMachineTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). + Build()). + WithWorkerMachineDeploymentClasses( + *builder.MachineDeploymentClass("aa"). + WithInfrastructureTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithBootstrapTemplate( + builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}")}). + Build()). + Build(), + expectErr: false, + }, + { + name: "should return error for invalid ControlPlane namingStrategy.template", + in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate( + builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithControlPlaneTemplate( + builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). + Build()). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("template-cp-{{ .invalidkey }}")}). + WithControlPlaneInfrastructureMachineTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). + Build()). + Build(), + expectErr: true, + }, + { + name: "should return error for ControlPlane namingStrategy.template when the generated name does not conform to RFC 1123", + in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate( + builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithControlPlaneTemplate( + builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). + Build()). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("template-cp-{{ .cluster.name }}-")}). + WithControlPlaneInfrastructureMachineTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). + Build()). + Build(), + expectErr: true, + }, + { + name: "should return error for invalid MachineDeployment namingStrategy.template", + in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate( + builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithControlPlaneTemplate( + builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). + Build()). + WithControlPlaneInfrastructureMachineTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). + Build()). + WithWorkerMachineDeploymentClasses( + *builder.MachineDeploymentClass("aa"). + WithInfrastructureTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithBootstrapTemplate( + builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("template-md-{{ .cluster.name")}). + Build()). + Build(), + expectErr: true, + }, + { + name: "should return error for invalid MachineDeployment namingStrategy.template when the generated name does not conform to RFC 1123", + in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate( + builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithControlPlaneTemplate( + builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). + Build()). + WithControlPlaneInfrastructureMachineTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). + Build()). + WithWorkerMachineDeploymentClasses( + *builder.MachineDeploymentClass("aa"). + WithInfrastructureTemplate( + builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). + WithBootstrapTemplate( + builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("template-md-{{ .cluster.name }}-")}). + Build()). + Build(), + expectErr: true, + }, } for _, tt := range tests { diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index e6ac76b577b6..9a4ee30ea30e 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -13,6 +13,8 @@ spec: kind: DockerMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 name: quick-start-control-plane + namingStrategy: + template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: ref: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -21,6 +23,8 @@ spec: workers: machineDeployments: - class: default-worker + namingStrategy: + template: "{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}" template: bootstrap: ref: