From 53ca4a0a2e1f20bec1378e904ce1d06305d9b990 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Mon, 8 Jun 2020 17:16:16 +0200 Subject: [PATCH 1/2] add fake client wrapper for setting resource version --- test/helpers/client.go | 44 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 test/helpers/client.go diff --git a/test/helpers/client.go b/test/helpers/client.go new file mode 100644 index 000000000000..23d58ab06bfd --- /dev/null +++ b/test/helpers/client.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// NewFakeClientWithScheme creates a new fake client with the given scheme +// for testing. +// You can choose to initialize it with a slice of runtime.Object; all the objects with be given +// a fake ResourceVersion="1" so it will be possible to use optimistic lock when patching conditions. +func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { + for _, obj := range initObjs { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Errorf("failed to get accessor for object: %v", err)) + } + + if accessor.GetResourceVersion() == "" { + accessor.SetResourceVersion("1") + } + } + return fake.NewFakeClientWithScheme(clientScheme, initObjs...) +} From 28b1087a10015beb421b51cbd4eade9653744710 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Mon, 8 Jun 2020 17:53:19 +0200 Subject: [PATCH 2/2] Add conditions to the KubeadmConfig object --- bootstrap/kubeadm/api/v1alpha2/conversion.go | 1 + .../api/v1alpha2/zz_generated.conversion.go | 1 + .../kubeadm/api/v1alpha3/condition_consts.go | 68 ++++++++++++++ .../v1alpha3/kubeadmbootstrapconfig_types.go | 13 +++ .../api/v1alpha3/zz_generated.deepcopy.go | 8 ++ ...strap.cluster.x-k8s.io_kubeadmconfigs.yaml | 44 +++++++++ .../controllers/kubeadmconfig_controller.go | 86 ++++++++++------- .../kubeadmconfig_controller_test.go | 93 +++++++++++++------ 8 files changed, 252 insertions(+), 62 deletions(-) create mode 100644 bootstrap/kubeadm/api/v1alpha3/condition_consts.go diff --git a/bootstrap/kubeadm/api/v1alpha2/conversion.go b/bootstrap/kubeadm/api/v1alpha2/conversion.go index 8490b773ccff..e2898cf5479f 100644 --- a/bootstrap/kubeadm/api/v1alpha2/conversion.go +++ b/bootstrap/kubeadm/api/v1alpha2/conversion.go @@ -43,6 +43,7 @@ func (src *KubeadmConfig) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.DiskSetup = restored.Spec.DiskSetup dst.Spec.Mounts = restored.Spec.Mounts dst.Spec.Files = restored.Spec.Files + dst.Status.Conditions = restored.Status.Conditions // Track files successfully up-converted. We need this to dedupe // restored files from user-updated files on up-conversion. We store diff --git a/bootstrap/kubeadm/api/v1alpha2/zz_generated.conversion.go b/bootstrap/kubeadm/api/v1alpha2/zz_generated.conversion.go index 3d41330a842a..7cf6fcc3587f 100644 --- a/bootstrap/kubeadm/api/v1alpha2/zz_generated.conversion.go +++ b/bootstrap/kubeadm/api/v1alpha2/zz_generated.conversion.go @@ -312,6 +312,7 @@ func autoConvert_v1alpha3_KubeadmConfigStatus_To_v1alpha2_KubeadmConfigStatus(in // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type // WARNING: in.ObservedGeneration requires manual conversion: does not exist in peer-type + // WARNING: in.Conditions requires manual conversion: does not exist in peer-type return nil } diff --git a/bootstrap/kubeadm/api/v1alpha3/condition_consts.go b/bootstrap/kubeadm/api/v1alpha3/condition_consts.go new file mode 100644 index 000000000000..345ab71e7ba4 --- /dev/null +++ b/bootstrap/kubeadm/api/v1alpha3/condition_consts.go @@ -0,0 +1,68 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + +// Conditions and condition Reasons for the KubeadmConfig object + +const ( + // DataSecretAvailableCondition documents the status of the bootstrap secret generation process. + // + // NOTE: When the DataSecret generation starts the process completes immediately and within the + // same reconciliation, so the user will always see a transition from Wait to Generated without having + // evidence that BootstrapSecret generation is started/in progress. + DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + + // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process + // waiting for the cluster infrastructure to be ready. + // + // NOTE: Having the cluster infrastructure ready is a pre-condition for starting to create machines; + // the KubeadmConfig controller ensure this pre-condition is satisfied. + WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" + + // WaitingForControlPlaneAvailableReason (Severity=Info) document a bootstrap secret generation process + // waiting for the control plane machine to be available. + // + // NOTE: Having the control plane machine available is a pre-condition for joining additional control planes + // or workers nodes. + WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable" + + // DataSecretGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting + // an error while generating a data secret; those kind of errors are usually due to misconfigurations + // and user intervention is required to get them fixed. + DataSecretGenerationFailedReason = "DataSecretGenerationFailed" +) + +const ( + // CertificatesAvailableCondition documents that cluster certificates are available. + // + // NOTE: Cluster certificates are generated only for the KubeadmConfig object linked to the initial control plane + // machine, if the cluster is not using a control plane ref object, if the certificates are not provided + // by the users. + // IMPORTANT: This condition won't be re-created after clusterctl move. + CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + + // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting + // an error while generating certificates; those kind of errors are usually temporary and the controller + // automatically recover from them. + CertificatesGenerationFailedReason = "CertificatesGenerationFailed" + + // CertificatesCorruptedReason (Severity=Error) documents a KubeadmConfig controller detecting + // an error while while retrieving certificates for a joining node. + CertificatesCorruptedReason = "CertificatesCorrupted" +) diff --git a/bootstrap/kubeadm/api/v1alpha3/kubeadmbootstrapconfig_types.go b/bootstrap/kubeadm/api/v1alpha3/kubeadmbootstrapconfig_types.go index a2464eb6354a..31edf633a4ed 100644 --- a/bootstrap/kubeadm/api/v1alpha3/kubeadmbootstrapconfig_types.go +++ b/bootstrap/kubeadm/api/v1alpha3/kubeadmbootstrapconfig_types.go @@ -18,6 +18,7 @@ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" kubeadmv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" ) @@ -124,6 +125,10 @@ type KubeadmConfigStatus struct { // ObservedGeneration is the latest generation observed by the controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions defines current service state of the KubeadmConfig. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -140,6 +145,14 @@ type KubeadmConfig struct { Status KubeadmConfigStatus `json:"status,omitempty"` } +func (c *KubeadmConfig) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +func (c *KubeadmConfig) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + // +kubebuilder:object:root=true // KubeadmConfigList contains a list of KubeadmConfig diff --git a/bootstrap/kubeadm/api/v1alpha3/zz_generated.deepcopy.go b/bootstrap/kubeadm/api/v1alpha3/zz_generated.deepcopy.go index 3d27bff3d41e..1f016861de1e 100644 --- a/bootstrap/kubeadm/api/v1alpha3/zz_generated.deepcopy.go +++ b/bootstrap/kubeadm/api/v1alpha3/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime" + apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" ) @@ -277,6 +278,13 @@ func (in *KubeadmConfigStatus) DeepCopyInto(out *KubeadmConfigStatus) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1alpha3.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmConfigStatus. diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml index a0047aa0d84c..54a4abe5d7c0 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml @@ -1738,6 +1738,50 @@ spec: be removed in a future version. Switch to DataSecretName." format: byte type: string + conditions: + description: Conditions defines current service state of the KubeadmConfig. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - status + - type + type: object + type: array dataSecretName: description: DataSecretName is the name of the secret that stores the bootstrap data script. diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go index e3a5c2ac983a..6c9b9963cf88 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" @@ -142,7 +143,7 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } - log.Error(err, "failed to get config") + log.Error(err, "Failed to get config") return ctrl.Result{}, err } @@ -153,7 +154,7 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re return ctrl.Result{}, nil } if err != nil { - log.Error(err, "failed to get owner") + log.Error(err, "Failed to get owner") return ctrl.Result{}, err } if configOwner == nil { @@ -173,7 +174,7 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re log.Info("Cluster does not exist yet, waiting until it is created") return ctrl.Result{}, nil } - log.Error(err, "could not get cluster with metadata") + log.Error(err, "Could not get cluster with metadata") return ctrl.Result{}, err } @@ -195,23 +196,38 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re return ctrl.Result{}, err } + // Attempt to Patch the KubeadmConfig object and status after each reconciliation if no error occurs. + defer func() { + // always update the readyCondition; the summary is represented using the "1 of x completed" notation. + conditions.SetSummary(config, conditions.WithStepCounter(1)) + + if err := patchHelper.Patch(ctx, config); err != nil { + log.Error(rerr, "Failed to patch config") + if rerr == nil { + rerr = err + } + } + }() + switch { // Wait for the infrastructure to be ready. case !cluster.Status.InfrastructureReady: log.Info("Cluster infrastructure is not ready, waiting") + conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil // Migrate plaintext data to secret. case config.Status.BootstrapData != nil && config.Status.DataSecretName == nil: if err := r.storeBootstrapData(ctx, scope, config.Status.BootstrapData); err != nil { return ctrl.Result{}, err } - return ctrl.Result{}, patchHelper.Patch(ctx, config) + return ctrl.Result{}, nil // Reconcile status for machines that already have a secret reference, but our status isn't up to date. // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil): config.Status.Ready = true config.Status.DataSecretName = configOwner.DataSecretName() - return ctrl.Result{}, patchHelper.Patch(ctx, config) + conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + return ctrl.Result{}, nil // Status is ready means a config has been generated. case config.Status.Ready: // If the BootstrapToken has been generated for a join and the infrastructure is not ready. @@ -222,11 +238,11 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re remoteClient, err := r.remoteClientGetter(ctx, r.Client, util.ObjectKey(cluster), r.scheme) if err != nil { - log.Error(err, "error creating remote cluster client") + log.Error(err, "Error creating remote cluster client") return ctrl.Result{}, err } - log.Info("refreshing token until the infrastructure has a chance to consume it") + log.Info("Refreshing token until the infrastructure has a chance to consume it") err = refreshToken(remoteClient, token) if err != nil { // It would be nice to re-create the bootstrap token if the error was "not found", but we have no way to update the Machine's bootstrap data @@ -241,15 +257,6 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re return ctrl.Result{}, nil } - // Attempt to Patch the KubeadmConfig object and status after each reconciliation if no error occurs. - defer func() { - if rerr == nil { - if rerr = patchHelper.Patch(ctx, config); rerr != nil { - log.Error(rerr, "failed to patch config") - } - } - }() - if !cluster.Status.ControlPlaneInitialized { return r.handleClusterNotInitialized(ctx, scope) } @@ -276,6 +283,13 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re } func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Context, scope *Scope) (_ ctrl.Result, reterr error) { + // initialize the DataSecretAvailableCondition if missing. + // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing + // using the DataSecretGeneratedFailedReason + if !(conditions.Has(scope.Config, bootstrapv1.DataSecretAvailableCondition)) { + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + } + // if it's NOT a control plane machine, requeue if !scope.ConfigOwner.IsControlPlaneMachine() { return ctrl.Result{RequeueAfter: 30 * time.Second}, nil @@ -326,7 +340,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } initdata, err := kubeadmv1beta1.ConfigurationToYAML(scope.Config.Spec.InitConfiguration) if err != nil { - scope.Error(err, "failed to marshal init configuration") + scope.Error(err, "Failed to marshal init configuration") return ctrl.Result{}, err } @@ -344,7 +358,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex clusterdata, err := kubeadmv1beta1.ConfigurationToYAML(scope.Config.Spec.ClusterConfiguration) if err != nil { - scope.Error(err, "failed to marshal cluster configuration") + scope.Error(err, "Failed to marshal cluster configuration") return ctrl.Result{}, err } @@ -356,9 +370,10 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex *metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("KubeadmConfig")), ) if err != nil { - scope.Error(err, "unable to lookup or create cluster certificates") + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } + conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) verbosityFlag := "" if scope.Config.Spec.Verbosity != nil { @@ -367,7 +382,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex files, err := r.resolveFiles(ctx, scope.Config, certificates.AsFiles()...) if err != nil { - scope.Error(err, "Failed to resolve files") + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } @@ -387,12 +402,12 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex Certificates: certificates, }) if err != nil { - scope.Error(err, "failed to generate cloud init for bootstrap control plane") + scope.Error(err, "Failed to generate cloud init for bootstrap control plane") return ctrl.Result{}, err } if err := r.storeBootstrapData(ctx, scope, cloudInitData); err != nil { - scope.Error(err, "failed to store bootstrap data") + scope.Error(err, "Failed to store bootstrap data") return ctrl.Result{}, err } @@ -407,13 +422,14 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) util.ObjectKey(scope.Cluster), ) if err != nil { - scope.Error(err, "unable to lookup cluster certificates") + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { - scope.Error(err, "Missing certificates") + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } + conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) // ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster if err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil { @@ -426,7 +442,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) joinData, err := kubeadmv1beta1.ConfigurationToYAML(scope.Config.Spec.JoinConfiguration) if err != nil { - scope.Error(err, "failed to marshal join configuration") + scope.Error(err, "Failed to marshal join configuration") return ctrl.Result{}, err } @@ -443,7 +459,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - scope.Error(err, "Failed to resolve files") + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } @@ -462,12 +478,12 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) JoinConfiguration: joinData, }) if err != nil { - scope.Error(err, "failed to create a worker join configuration") + scope.Error(err, "Failed to create a worker join configuration") return ctrl.Result{}, err } if err := r.storeBootstrapData(ctx, scope, cloudJoinData); err != nil { - scope.Error(err, "failed to store bootstrap data") + scope.Error(err, "Failed to store bootstrap data") return ctrl.Result{}, err } return ctrl.Result{}, nil @@ -489,12 +505,14 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S util.ObjectKey(scope.Cluster), ) if err != nil { - scope.Error(err, "unable to lookup cluster certificates") + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } + conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) // ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster if err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil { @@ -507,7 +525,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S joinData, err := kubeadmv1beta1.ConfigurationToYAML(scope.Config.Spec.JoinConfiguration) if err != nil { - scope.Error(err, "failed to marshal join configuration") + scope.Error(err, "Failed to marshal join configuration") return ctrl.Result{}, err } @@ -520,7 +538,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S files, err := r.resolveFiles(ctx, scope.Config, certificates.AsFiles()...) if err != nil { - scope.Error(err, "Failed to resolve files") + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } @@ -540,12 +558,12 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S }, }) if err != nil { - scope.Error(err, "failed to create a control plane join configuration") + scope.Error(err, "Failed to create a control plane join configuration") return ctrl.Result{}, err } if err := r.storeBootstrapData(ctx, scope, cloudJoinData); err != nil { - scope.Error(err, "failed to store bootstrap data") + scope.Error(err, "Failed to store bootstrap data") return ctrl.Result{}, err } @@ -803,8 +821,8 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope } r.Log.Info("bootstrap data secret for KubeadmConfig already exists", "secret", secret.Name, "KubeadmConfig", scope.Config.Name) } - scope.Config.Status.DataSecretName = pointer.StringPtr(secret.Name) scope.Config.Status.Ready = true + conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) return nil } diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go index 2d0745c562e5..f531d0a47d83 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go @@ -25,7 +25,6 @@ import ( "time" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -39,11 +38,12 @@ import ( fakeremote "sigs.k8s.io/cluster-api/controllers/remote/fake" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/test/helpers" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -86,7 +86,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { } machineObjs = append(machineObjs, m) } - fakeClient := fake.NewFakeClientWithScheme(setupScheme(), objs...) + fakeClient := helpers.NewFakeClientWithScheme(setupScheme(), objs...) reconciler := &KubeadmConfigReconciler{ Log: log.Log, Client: fakeClient, @@ -114,7 +114,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * objects := []runtime.Object{ config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -144,7 +144,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnErrorIfReferencedMachineIsNotFo // intentionally omitting machine config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -173,7 +173,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -207,7 +207,7 @@ func TestKubeadmConfigReconciler_Reconcile_MigrateToSecret(t *testing.T) { machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -254,7 +254,7 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -272,6 +272,7 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) actualResult, actualError := k.Reconcile(request) g.Expect(actualResult).To(Equal(expectedResult)) g.Expect(actualError).NotTo(HaveOccurred()) + assertHasFalseCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForClusterInfrastructureReason) } // Return early If the owning machine does not have an associated cluster @@ -285,7 +286,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *t machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -313,7 +314,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfMachineDoesNotHaveAssociat machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -343,7 +344,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfAssociatedClusterIsNotFoun machine, config, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -409,7 +410,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - myclient := fake.NewFakeClientWithScheme(setupScheme(), tc.objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), tc.objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -421,6 +422,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) + assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForControlPlaneAvailableReason) }) } } @@ -442,7 +444,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -465,6 +467,8 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + assertHasTrueCondition(g, myclient, request, bootstrapv1.CertificatesAvailableCondition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition) // Ensure that we don't fail trying to refresh any bootstrap tokens _, err = k.Reconcile(request) @@ -492,7 +496,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC controlPlaneJoinConfig, } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -531,7 +535,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, @@ -604,7 +608,7 @@ func TestReconcileIfJoinNodesAndControlPlaneIsReady(t *testing.T) { config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -627,6 +631,7 @@ func TestReconcileIfJoinNodesAndControlPlaneIsReady(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition) l := &corev1.SecretList{} err = myclient.List(context.Background(), l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) @@ -680,7 +685,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -735,7 +740,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -808,7 +813,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -937,7 +942,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testing.T) { k := &KubeadmConfigReconciler{ Log: log.Log, - Client: fake.NewFakeClientWithScheme(setupScheme()), + Client: helpers.NewFakeClientWithScheme(setupScheme()), KubeadmInitLock: &myInitLocker{}, remoteClientGetter: fakeremote.NewClusterClient, } @@ -1266,7 +1271,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) reconciler := KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -1307,7 +1312,7 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) { expectedNames = append(expectedNames, configName) objs = append(objs, m, c) } - fakeClient := fake.NewFakeClientWithScheme(setupScheme(), objs...) + fakeClient := helpers.NewFakeClientWithScheme(setupScheme(), objs...) reconciler := &KubeadmConfigReconciler{ Log: log.Log, Client: fakeClient, @@ -1351,7 +1356,7 @@ func TestKubeadmConfigReconciler_Reconcile_DoesNotFailIfCASecretsAlreadyExist(t "tls.key": []byte("hello world"), }, } - fakec := fake.NewFakeClientWithScheme(setupScheme(), []runtime.Object{cluster, m, c, scrt}...) + fakec := helpers.NewFakeClientWithScheme(setupScheme(), []runtime.Object{cluster, m, c, scrt}...) reconciler := &KubeadmConfigReconciler{ Log: log.Log, Client: fakec, @@ -1384,7 +1389,7 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali controlPlaneInitMachineSecond, controlPlaneInitConfigSecond, } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -1414,8 +1419,8 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) } -// No patch should be applied if there is an error in reconcile -func TestKubeadmConfigReconciler_Reconcile_DoNotPatchWhenErrorOccurred(t *testing.T) { +// Patch should be applied if there is an error in reconcile +func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) { g := NewWithT(t) cluster := newCluster("cluster") @@ -1440,7 +1445,7 @@ func TestKubeadmConfigReconciler_Reconcile_DoNotPatchWhenErrorOccurred(t *testin objects = append(objects, s) } - myclient := fake.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) k := &KubeadmConfigReconciler{ Log: log.Log, Client: myclient, @@ -1462,7 +1467,7 @@ func TestKubeadmConfigReconciler_Reconcile_DoNotPatchWhenErrorOccurred(t *testin cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg") g.Expect(err).NotTo(HaveOccurred()) // check if the kubeadm config has been patched - g.Expect(cfg.Spec.InitConfiguration).To(BeNil()) + g.Expect(cfg.Spec.InitConfiguration).ToNot(BeNil()) } // test utils @@ -1676,3 +1681,35 @@ func (m *myInitLocker) Unlock(_ context.Context, _ *clusterv1.Cluster) bool { } return true } + +func assertHasFalseCondition(g *WithT, myclient client.Client, req ctrl.Request, t clusterv1.ConditionType, s clusterv1.ConditionSeverity, r string) { + config := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Name, + Namespace: req.Namespace, + }, + } + + configKey, _ := client.ObjectKeyFromObject(config) + g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) + c := conditions.Get(config, t) + g.Expect(c).ToNot(BeNil()) + g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(c.Severity).To(Equal(s)) + g.Expect(c.Reason).To(Equal(r)) +} + +func assertHasTrueCondition(g *WithT, myclient client.Client, req ctrl.Request, t clusterv1.ConditionType) { + config := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Name, + Namespace: req.Namespace, + }, + } + + configKey, _ := client.ObjectKeyFromObject(config) + g.Expect(myclient.Get(context.TODO(), configKey, config)).To(Succeed()) + c := conditions.Get(config, t) + g.Expect(c).ToNot(BeNil()) + g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) +}