diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index eb5cde78a4b8..b57d5fae2475 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -80,7 +80,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get infrastructure machine template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) } - // Get the bootstrap machine template. + // Get the bootstrap config template. machineDeploymentBlueprint.BootstrapTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Bootstrap.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get bootstrap config template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) @@ -109,7 +109,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) } - // Get the bootstrap config. + // Get the bootstrap config template. machinePoolBlueprint.BootstrapTemplate, err = r.getReference(ctx, machinePoolClass.Template.Bootstrap.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get bootstrap config for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 30390a5989cc..79ed85b31259 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -106,15 +106,18 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste } // The topology is not considered as fully reconciled if one of the following is true: - // * either the Control Plane or any of the MachineDeployments are still pending to pick up the new version + // * either the Control Plane or any of the MachineDeployments/MachinePools are still pending to pick up the new version // (generally happens when upgrading the cluster) - // * when there are MachineDeployments for which the upgrade has been deferred - // * when new MachineDeployments are pending to be created + // * when there are MachineDeployments/MachinePools for which the upgrade has been deferred + // * when new MachineDeployments/MachinePools are pending to be created // (generally happens when upgrading the cluster) if s.UpgradeTracker.ControlPlane.IsPendingUpgrade || s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() || s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() || - s.UpgradeTracker.MachineDeployments.DeferredUpgrade() { + s.UpgradeTracker.MachineDeployments.DeferredUpgrade() || + s.UpgradeTracker.MachinePools.IsAnyPendingCreate() || + s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() || + s.UpgradeTracker.MachinePools.DeferredUpgrade() { msgBuilder := &strings.Builder{} var reason string @@ -180,6 +183,11 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are upgrading", computeNameList(s.UpgradeTracker.MachineDeployments.UpgradingNames()), ) + + case len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0: + fmt.Fprintf(msgBuilder, " MachinePool(s) %s are upgrading", + computeNameList(s.UpgradeTracker.MachinePools.UpgradingNames()), + ) } conditions.Set( diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index 46c891149413..ecd52ef4ef96 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -289,6 +289,9 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa state := make(scope.MachinePoolsStateMap) // List all the machine pools in the current cluster and in a managed topology. + // Note: This is a cached list call. We ensure in reconcile_state that the cache is up-to-date + // after we create/update a MachinePool and we double-check if an MP already exists before + // we create it. mp := &expv1.MachinePoolList{} err := r.Client.List(ctx, mp, client.MatchingLabels{ @@ -328,7 +331,7 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa // Gets the infraRef. infraRef := &m.Spec.Template.Spec.InfrastructureRef if infraRef.Name == "" { - return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachineTemplate", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachinePool", tlog.KObj{Obj: m}) } // If the mpTopology exists in the Cluster, lookup the corresponding mpBluePrint and align diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index fe8a7df64fb6..e0c69a9dea30 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -84,7 +84,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* if len(s.Current.MachinePools) > 0 { client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading") } // Mark all the MachinePools that are currently upgrading. mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client) @@ -464,10 +464,11 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc } // If the control plane is not upgrading or scaling, we can assume the control plane is stable. - // However, we should also check for the MachineDeployments upgrading. - // If the MachineDeployments are upgrading, then do not pick up the desiredVersion yet. - // We will pick up the new version after the MachineDeployments finish upgrading. - if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 { + // However, we should also check for the MachineDeployments/MachinePools upgrading. + // If the MachineDeployments/MachinePools are upgrading, then do not pick up the desiredVersion yet. + // We will pick up the new version after the MachineDeployments/MachinePools finish upgrading. + if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 || + len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 { return *currentVersion, nil } @@ -950,7 +951,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c template: machinePoolBlueprint.BootstrapTemplate, templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate), cluster: s.Current.Cluster, - namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), + namePrefix: bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentBootstrapConfigRef, }) if err != nil { @@ -961,11 +962,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c if bootstrapObjectLabels == nil { bootstrapObjectLabels = map[string]string{} } - // Add ClusterTopologyMachinePoolLabel to the generated Bootstrap template + // Add ClusterTopologyMachinePoolLabel to the generated Bootstrap config bootstrapObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name desiredMachinePool.BootstrapObject.SetLabels(bootstrapObjectLabels) - // Compute the Infrastructure ref. + // Compute the InfrastructureMachinePool. var currentInfraMachinePoolRef *corev1.ObjectReference if currentMachinePool != nil && currentMachinePool.InfrastructureMachinePoolObject != nil { currentInfraMachinePoolRef = ¤tMachinePool.Object.Spec.Template.Spec.InfrastructureRef @@ -991,6 +992,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c version := computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) // Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology + minReadySeconds := machinePoolClass.MinReadySeconds + if machinePoolTopology.MinReadySeconds != nil { + minReadySeconds = machinePoolTopology.MinReadySeconds + } + failureDomains := machinePoolClass.FailureDomains if machinePoolTopology.FailureDomains != nil { failureDomains = machinePoolTopology.FailureDomains @@ -1031,8 +1037,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c Namespace: s.Current.Cluster.Namespace, }, Spec: expv1.MachinePoolSpec{ - ClusterName: s.Current.Cluster.Name, - FailureDomains: failureDomains, + ClusterName: s.Current.Cluster.Name, + MinReadySeconds: minReadySeconds, + FailureDomains: failureDomains, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, diff --git a/internal/controllers/topology/cluster/patches/patch.go b/internal/controllers/topology/cluster/patches/patch.go index 29ce0fd56a65..c6a4b7811171 100644 --- a/internal/controllers/topology/cluster/patches/patch.go +++ b/internal/controllers/topology/cluster/patches/patch.go @@ -57,7 +57,7 @@ func (i PreserveFields) ApplyToHelper(opts *PatchOptions) { opts.preserveFields = i } -// patchObject overwrites spec in object with spec.template.spec of patchedTemplate, +// patchObject overwrites spec in object with spec.template.spec of modifiedObject, // while preserving the configured fields. // For example, ControlPlane.spec will be overwritten with the patched // ControlPlaneTemplate.spec.template.spec but preserving spec.version and spec.replicas @@ -66,7 +66,7 @@ func patchObject(ctx context.Context, object, modifiedObject *unstructured.Unstr return patchUnstructured(ctx, object, modifiedObject, "spec.template.spec", "spec", opts...) } -// patchTemplate overwrites spec.template.spec in template with spec.template.spec of patchedTemplate, +// patchTemplate overwrites spec.template.spec in template with spec.template.spec of modifiedTemplate, // while preserving the configured fields. // For example, it's possible to patch BootstrapTemplate.spec.template.spec with a patched // BootstrapTemplate.spec.template.spec while preserving fields configured via opts.fieldsToPreserve. diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 6fc58cb21ba9..fda426c11871 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -737,6 +737,10 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope) // Create MachinePools. if len(diff.toCreate) > 0 { + // In current state we only got the MP list via a cached call. + // As a consequence, in order to prevent the creation of duplicate MP due to stale reads, + // we are now using a live client to double-check here that the MachinePool + // to be created doesn't exist yet. currentMPTopologyNames, err := r.getCurrentMachinePools(ctx, s) if err != nil { return err @@ -806,8 +810,6 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope) // createMachinePool creates a MachinePool and the corresponding templates. func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error { // Do not create the MachinePool if it is marked as pending create. - // This will also block MHC creation because creating the MHC without the corresponding - // MachinePool is unnecessary. mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] if !ok || mpTopologyName == "" { // Note: This is only an additional safety check and should not happen. The label will always be added when computing @@ -868,7 +870,7 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * return nil } -// updateMachinePool updates a MachinePool. Also rotates the corresponding Templates if necessary. +// updateMachinePool updates a MachinePool. Also updates the corresponding objects if necessary. func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, currentMP, desiredMP *scope.MachinePoolState) error { log := tlog.LoggerFrom(ctx).WithMachinePool(desiredMP.Object) @@ -882,20 +884,18 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, curr cluster := s.Current.Cluster infraCtx, _ := log.WithObject(desiredMP.InfrastructureMachinePoolObject).Into(ctx) if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{ - cluster: cluster, - current: currentMP.InfrastructureMachinePoolObject, - desired: desiredMP.InfrastructureMachinePoolObject, - versionGetter: contract.ControlPlane().Version().Get, + cluster: cluster, + current: currentMP.InfrastructureMachinePoolObject, + desired: desiredMP.InfrastructureMachinePoolObject, }); err != nil { return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) } bootstrapCtx, _ := log.WithObject(desiredMP.BootstrapObject).Into(ctx) if err := r.reconcileReferencedObject(bootstrapCtx, reconcileReferencedObjectInput{ - cluster: cluster, - current: currentMP.BootstrapObject, - desired: desiredMP.BootstrapObject, - versionGetter: contract.ControlPlane().Version().Get, + cluster: cluster, + current: currentMP.BootstrapObject, + desired: desiredMP.BootstrapObject, }); err != nil { return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) } diff --git a/internal/controllers/topology/cluster/scope/blueprint.go b/internal/controllers/topology/cluster/scope/blueprint.go index 1f93ba84258e..08e734505f80 100644 --- a/internal/controllers/topology/cluster/scope/blueprint.go +++ b/internal/controllers/topology/cluster/scope/blueprint.go @@ -87,7 +87,7 @@ type MachinePoolBlueprint struct { // BootstrapTemplate holds the bootstrap template for a MachinePool referenced from ClusterClass. BootstrapTemplate *unstructured.Unstructured - // InfrastructureMachinePoolTemplate holds the infrastructure machine template for a MachinePool referenced from ClusterClass. + // InfrastructureMachinePoolTemplate holds the infrastructure machine pool template for a MachinePool referenced from ClusterClass. InfrastructureMachinePoolTemplate *unstructured.Unstructured } diff --git a/internal/controllers/topology/cluster/scope/state.go b/internal/controllers/topology/cluster/scope/state.go index 7b2ca82a12a5..23d6239cef5e 100644 --- a/internal/controllers/topology/cluster/scope/state.go +++ b/internal/controllers/topology/cluster/scope/state.go @@ -161,7 +161,7 @@ type MachinePoolState struct { } // IsUpgrading determines if the MachinePool is upgrading. -// A machine deployment is considered upgrading if at least one of the Machines of this +// A machine pool is considered upgrading if at least one of the Machines of this // MachinePool has a different version. func (mp *MachinePoolState) IsUpgrading(ctx context.Context, c client.Client) (bool, error) { // If the MachinePool has no version there is no definitive way to check if it is upgrading. Therefore, return false. diff --git a/internal/controllers/topology/cluster/util.go b/internal/controllers/topology/cluster/util.go index 29eed8494b2d..5fa6711c1393 100644 --- a/internal/controllers/topology/cluster/util.go +++ b/internal/controllers/topology/cluster/util.go @@ -37,6 +37,11 @@ func infrastructureMachineTemplateNamePrefix(clusterName, machineDeploymentTopol return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) } +// bootstrapConfigNamePrefix calculates the name prefix for a BootstrapConfig. +func bootstrapConfigNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + // infrastructureMachinePoolNamePrefix calculates the name prefix for a InfrastructureMachinePool. func infrastructureMachinePoolNamePrefix(clusterName, machinePoolTopologyName string) string { return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index f1368c041a3a..1e7651cfd7a3 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -302,7 +302,8 @@ func (webhook *ClusterClass) validateRemovedMachinePoolClassesAreNotUsed(cluster for _, c := range clusters { for _, machinePoolTopology := range c.Spec.Topology.Workers.MachinePools { if removedClasses.Has(machinePoolTopology.Class) { - // TODO(killianmuldoon): Same as above for MachineDeployments + // TODO(killianmuldoon): Improve error printing here so large scale changes don't flood the error log e.g. deduplication, only example usages given. + // TODO: consider if we get the index of the MachinePoolClass being deleted allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "workers", "machinePools"), fmt.Sprintf("MachinePoolClass %q cannot be deleted because it is used by Cluster %q", machinePoolTopology.Class, c.Name), diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index 5fe57c1136b7..f7e2166ee458 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -211,6 +211,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste err := validateSelectorName(name, path, "machineDeploymentClass", i) if err != nil { allErrs = append(allErrs, err) + break } for _, md := range class.Spec.Workers.MachineDeployments { var matches bool @@ -246,6 +247,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste err := validateSelectorName(name, path, "machinePoolClass", i) if err != nil { allErrs = append(allErrs, err) + break } for _, mp := range class.Spec.Workers.MachinePools { var matches bool diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index a364d763713c..3807e2eba832 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -217,7 +217,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi log := ctrl.LoggerFrom(ctx) // Only patch the customImage if this DockerMachineTemplate belongs to a MachineDeployment or MachinePool with class "default-class" - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to MachineDeployments. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachineDeployments. mdClass, mdFound, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.class") if err != nil { return errors.Wrap(err, "could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs") @@ -231,7 +231,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi // This is a required variable. Return an error if it's not found. // NOTE: this should never happen because it is enforced by the patch engine. if !mdFound && !mpFound { - return errors.New("could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs: variable \"builtin.machineDeployment.class\" not found") + return errors.New("could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs: could find neither \"builtin.machineDeployment.class\" nor \"builtin.machinePool.class\" variable") } if mdClass == "default-worker" { @@ -272,7 +272,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi // If the Kubernetes version from builtin.machinePool.version is below 1.24.0 set "cgroup-driver": "cgroupDriverCgroupfs" to // - InitConfiguration.KubeletExtraArgs // - JoinConfiguration.KubeletExtraArgs - // NOTE: machinePool version might be different than Cluster.version or other machinePool's versions; + // NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions; // the builtin variables provides the right version to use. mpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machinePool.version") if err != nil { @@ -316,7 +316,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr // If the DockerMachineTemplate belongs to the ControlPlane, set the images using the ControlPlane version. // NOTE: ControlPlane version might be different than Cluster.version or MachineDeployment's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to the ControlPlane. cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") if err != nil { return errors.Wrap(err, "could not set customImage to control plane dockerMachineTemplate") @@ -335,9 +335,9 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr } // If the DockerMachineTemplate belongs to a MachineDeployment, set the images the MachineDeployment version. - // NOTE: MachineDeployment version might be different than Cluster.version or other MachineDeployment's versions; + // NOTE: MachineDeployment version might be different from Cluster.version or other MachineDeployment's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a built in variable that exists only for templates liked to MachineDeployments. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachineDeployments. mdVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.version") if err != nil { return errors.Wrap(err, "could not set customImage to MachineDeployment DockerMachineTemplate") @@ -354,7 +354,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr return nil } - // If the Docker Machine didn't have variables for either a control plane or a machineDeployment return an error. + // If the DockerMachineTemplate didn't have variables for either a control plane or a machineDeployment return an error. // NOTE: this should never happen because it is enforced by the patch engine. return errors.New("no version variables found for DockerMachineTemplate patch") } @@ -366,31 +366,10 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) - // If the DockerMachinePoolTemplate belongs to the ControlPlane, set the images using the ControlPlane version. - // NOTE: ControlPlane version might be different than Cluster.version or MachinePool's versions; - // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane. - cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") - if err != nil { - return errors.Wrap(err, "could not set customImage to control plane dockerMachinePoolTemplate") - } - if found { - semVer, err := version.ParseMajorMinorPatchTolerant(cpVersion) - if err != nil { - return errors.Wrap(err, "could not parse control plane version") - } - kindMapping := kind.GetMapping(semVer, "") - - log.Info(fmt.Sprintf("Setting MachinePool custom image to %q", kindMapping.Image)) - dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image - // return early if we have successfully patched a control plane dockerMachineTemplate - return nil - } - // If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version. - // NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions; + // NOTE: MachinePool version might be different from Cluster.version or other MachinePool's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a built in variable that exists only for templates liked to MachinePools. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachinePools. mpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machinePool.version") if err != nil { return errors.Wrap(err, "could not set customImage to MachinePool DockerMachinePoolTemplate") @@ -407,7 +386,7 @@ func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTempla return nil } - // If the Docker Machine didn't have variables for either a control plane or a machinePool return an error. + // If the DockerMachinePoolTemplate didn't have variables for a machinePool return an error. // NOTE: this should never happen because it is enforced by the patch engine. return errors.New("no version variables found for DockerMachinePoolTemplate patch") } diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 3bcde1a5c866..517188bc269d 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -25,6 +25,7 @@ patchesStrategicMerge: - patches/webhook_in_dockermachinetemplates.yaml - patches/webhook_in_dockerclusters.yaml - patches/webhook_in_dockerclustertemplates.yaml + - patches/webhook_in_dockermachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD @@ -33,6 +34,7 @@ patchesStrategicMerge: - patches/cainjection_in_dockermachinetemplates.yaml - patches/cainjection_in_dockerclusters.yaml - patches/cainjection_in_dockerclustertemplates.yaml + - patches/cainjection_in_dockermachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml new file mode 100644 index 000000000000..41f08eddb7ba --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dockermachinepooltemplates.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml new file mode 100644 index 000000000000..7bc2cde83d79 --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dockermachinepooltemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go index b4f16926739e..d08bdf4300a4 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go @@ -50,7 +50,7 @@ type DockerMachinePoolTemplateList struct { } func init() { - objectTypes = append(objectTypes, &DockerMachinePool{}, &DockerMachinePoolList{}) + objectTypes = append(objectTypes, &DockerMachinePoolTemplate{}, &DockerMachinePoolTemplateList{}) } // DockerMachinePoolTemplateResource describes the data needed to create a DockerMachine from a template.