From efda6712b7b8c517a74ebad7e8b734b143a9313c Mon Sep 17 00:00:00 2001 From: killianmuldoon Date: Wed, 27 Jul 2022 16:02:21 +0100 Subject: [PATCH] Add k/v logging to bootstrap and controlPlane Signed-off-by: killianmuldoon --- .../controllers/kubeadmconfig_controller.go | 36 +++--- .../locking/control_plane_init_mutex.go | 26 ++-- .../locking/control_plane_init_mutex_test.go | 111 +----------------- bootstrap/util/configowner.go | 7 ++ .../kubeadm/internal/controllers/helpers.go | 3 +- util/util.go | 7 ++ 6 files changed, 47 insertions(+), 143 deletions(-) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 0982757ccc11..088970e1e8c2 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -102,7 +102,7 @@ type Scope struct { // SetupWithManager sets up the reconciler with the Manager. func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { if r.KubeadmInitLock == nil { - r.KubeadmInitLock = locking.NewControlPlaneInitMutex(ctrl.LoggerFrom(ctx).WithName("init-locker"), mgr.GetClient()) + r.KubeadmInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient()) } if r.remoteClientGetter == nil { r.remoteClientGetter = remote.NewClusterClient @@ -173,7 +173,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques if configOwner == nil { return ctrl.Result{}, nil } - log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) + log = log.WithValues(configOwner.LowerCamelCaseKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) @@ -324,14 +324,14 @@ func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Co return ctrl.Result{}, err } if shouldRotate { - log.V(2).Info("Creating new bootstrap token") + log.Info("Creating new bootstrap token, the existing one should be rotated") token, err := createToken(ctx, remoteClient, r.TokenTTL) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create new bootstrap token") } config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token - log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "Token", token) + log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token") // update the bootstrap data return r.joinWorker(ctx, scope) @@ -381,7 +381,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } }() - scope.Info("Creating BootstrapData for the init control plane") + scope.Info("Creating BootstrapData for the first control plane") // Nb. in this case JoinConfiguration should not be defined by users, but in case of misconfigurations, CABPK simply ignore it @@ -498,6 +498,8 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) { + scope.Info("Creating BootstrapData for the worker node") + certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath) err := certificates.Lookup( ctx, @@ -537,8 +539,6 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return ctrl.Result{}, errors.New("Machine is a Worker, but JoinConfiguration.ControlPlane is set in the KubeadmConfig object") } - scope.Info("Creating BootstrapData for the worker node") - verbosityFlag := "" if scope.Config.Spec.Verbosity != nil { verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity))) @@ -595,6 +595,8 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) } func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) { + scope.Info("Creating BootstrapData for the joining control plane") + if !scope.ConfigOwner.IsControlPlaneMachine() { return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind()) } @@ -638,8 +640,6 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, err } - scope.Info("Creating BootstrapData for the join control plane") - verbosityFlag := "" if scope.Config.Spec.Verbosity != nil { verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity))) @@ -892,7 +892,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String() config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint - log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "APIServerEndpoint", apiServerEndpoint) + log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint) } // if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join @@ -908,7 +908,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste } config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token - log.Info("Altering JoinConfiguration.Discovery.BootstrapToken") + log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token") } // If the BootstrapToken does not contain any CACertHashes then force skip CA Verification @@ -930,39 +930,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co // then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster. if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() { config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String() - log.Info("Altering ClusterConfiguration", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) + log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) } // If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name if config.Spec.ClusterConfiguration.ClusterName == "" { config.Spec.ClusterConfiguration.ClusterName = cluster.Name - log.Info("Altering ClusterConfiguration", "ClusterName", config.Spec.ClusterConfiguration.ClusterName) + log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName) } // If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined if cluster.Spec.ClusterNetwork != nil { if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" { config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain - log.Info("Altering ClusterConfiguration", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) + log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) } if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" && cluster.Spec.ClusterNetwork.Services != nil && len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String() - log.Info("Altering ClusterConfiguration", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) } if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" && cluster.Spec.ClusterNetwork.Pods != nil && len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String() - log.Info("Altering ClusterConfiguration", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) } } // If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil { config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version - log.Info("Altering ClusterConfiguration", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) + log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) } } @@ -1001,7 +1001,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope if !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } - log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", secret.Name, "KubeadmConfig", scope.Config.Name) + log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", klog.KObj(secret)) if err := r.Client.Update(ctx, secret); err != nil { return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go index be19bf7a5ab8..41bac632cc5a 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go @@ -22,11 +22,12 @@ import ( "encoding/json" "fmt" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -36,14 +37,12 @@ const semaphoreInformationKey = "lock-information" // ControlPlaneInitMutex uses a ConfigMap to synchronize cluster initialization. type ControlPlaneInitMutex struct { - log logr.Logger client client.Client } // NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init. -func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPlaneInitMutex { +func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex { return &ControlPlaneInitMutex{ - log: log, client: client, } } @@ -52,7 +51,7 @@ func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPla func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) - log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName, "machine-name", machine.Name) + log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName)) err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, @@ -61,12 +60,12 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu case apierrors.IsNotFound(err): break case err != nil: - log.Error(err, "Failed to acquire lock") + log.Error(err, "Failed to acquire init lock") return false default: // Successfully found an existing config map. info, err := sema.information() if err != nil { - log.Error(err, "Failed to get information about the existing lock") + log.Error(err, "Failed to get information about the existing init lock") return false } // The machine requesting the lock is the machine that created the lock, therefore the lock is acquired. @@ -79,12 +78,12 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu Namespace: cluster.Namespace, Name: info.MachineName, }, &clusterv1.Machine{}); err != nil { - log.Error(err, "Failed to get machine holding ControlPlane lock") + log.Error(err, "Failed to get machine holding init lock") if apierrors.IsNotFound(err) { c.Unlock(ctx, cluster) } } - log.Info("Waiting on another machine to initialize", "init-machine", info.MachineName) + log.Info(fmt.Sprintf("Waiting for Machine %s to initialize", info.MachineName)) return false } @@ -92,7 +91,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu sema.setMetadata(cluster) // Adds the additional information if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil { - log.Error(err, "Failed to acquire lock while setting semaphore information") + log.Error(err, "Failed to acquire init lock while setting semaphore information") return false } @@ -100,10 +99,10 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu err = c.client.Create(ctx, sema.ConfigMap) switch { case apierrors.IsAlreadyExists(err): - log.Info("Cannot acquire the lock. The lock has been acquired by someone else") + log.Info("Cannot acquire the init lock. The init lock has been acquired by someone else") return false case err != nil: - log.Error(err, "Error acquiring the lock") + log.Error(err, "Error acquiring the init lock") return false default: return true @@ -114,8 +113,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) - log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName) - log.Info("Checking for lock") + log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName)) err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go index cd953975a752..f0d7d665da90 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go @@ -18,12 +18,10 @@ package locking import ( "context" - "encoding/json" "errors" "fmt" "testing" - "github.com/go-logr/logr" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -31,12 +29,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/log" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -107,7 +103,6 @@ func TestControlPlaneInitMutex_Lock(t *testing.T) { gs := NewWithT(t) l := &ControlPlaneInitMutex{ - log: log.Log, client: tc.client, } @@ -182,7 +177,6 @@ func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { l := &ControlPlaneInitMutex{ - log: log.Log, client: tc.client, } @@ -269,7 +263,6 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { gs := NewWithT(t) l := &ControlPlaneInitMutex{ - log: log.Log, client: tc.client, } @@ -286,60 +279,6 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { } } -func TestInfoLines_Lock(t *testing.T) { - g := NewWithT(t) - - uid := types.UID("test-uid") - info := information{MachineName: "my-control-plane"} - b, err := json.Marshal(info) - g.Expect(err).NotTo(HaveOccurred()) - - c := &fakeClient{ - Client: fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName(clusterName), - Namespace: clusterNamespace, - }, - Data: map[string]string{semaphoreInformationKey: string(b)}, - }).Build(), - } - - logtester := &logtests{ - InfoLog: make([]line, 0), - ErrorLog: make([]line, 0), - } - l := &ControlPlaneInitMutex{ - log: logr.New(logtester), - client: c, - } - - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: clusterNamespace, - Name: clusterName, - UID: uid, - }, - } - machine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("machine-%s", cluster.Name), - }, - } - - g.Expect(l.Lock(ctx, cluster, machine)).To(BeFalse()) - - foundLogLine := false - for _, line := range logtester.InfoLog { - for k, v := range line.data { - if k == "init-machine" && v.(string) == "my-control-plane" { - foundLogLine = true - } - } - } - - g.Expect(foundLogLine).To(BeTrue()) -} - type fakeClient struct { client.Client getError error @@ -367,51 +306,3 @@ func (fc *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...cli } return fc.Client.Delete(ctx, obj, opts...) } - -type logtests struct { - logr.Logger - InfoLog []line - ErrorLog []line -} - -type line struct { - line string - data map[string]interface{} -} - -func (l *logtests) Init(info logr.RuntimeInfo) { -} - -func (l *logtests) Enabled(level int) bool { - return true -} - -func (l *logtests) Info(level int, msg string, keysAndValues ...interface{}) { - data := make(map[string]interface{}) - for i := 0; i < len(keysAndValues); i += 2 { - data[keysAndValues[i].(string)] = keysAndValues[i+1] - } - l.InfoLog = append(l.InfoLog, line{ - line: msg, - data: data, - }) -} - -func (l *logtests) Error(err error, msg string, keysAndValues ...interface{}) { - data := make(map[string]interface{}) - for i := 0; i < len(keysAndValues); i += 2 { - data[keysAndValues[i].(string)] = keysAndValues[i+1] - } - l.ErrorLog = append(l.ErrorLog, line{ - line: msg + err.Error(), - data: data, - }) -} - -func (l *logtests) WithValues(keysAndValues ...interface{}) logr.LogSink { - return l -} - -func (l *logtests) WithName(name string) logr.LogSink { - return l -} diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go index 1d42e4c7e7c6..e40737e702a5 100644 --- a/bootstrap/util/configowner.go +++ b/bootstrap/util/configowner.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -123,6 +124,12 @@ func (co ConfigOwner) KubernetesVersion() string { return version } +// LowerCamelCaseKind mirrors how controller runtime formats the object's kind when used as a logging key +// for the object being reconciled. +func (co ConfigOwner) LowerCamelCaseKind() string { + return util.LowerCamelCaseKind(co.Unstructured) +} + // GetConfigOwner returns the Unstructured object owning the current resource. func GetConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*ConfigOwner, error) { allowedGKs := []schema.GroupKind{ diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 31034141ca9f..03e01f0ecde0 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/storage/names" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -103,7 +104,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, controllerOwnerRef metav1.OwnerReference) error { log := ctrl.LoggerFrom(ctx) - log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "Name", configSecret.Name) + log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "secret", klog.KObj(configSecret)) patch, err := patch.NewHelper(configSecret, r.Client) if err != nil { diff --git a/util/util.go b/util/util.go index 4836d7afcba4..cb2d4e1c2344 100644 --- a/util/util.go +++ b/util/util.go @@ -604,3 +604,10 @@ func IsNil(i interface{}) bool { } return false } + +// LowerCamelCaseKind mirrors how controller runtime formats the object's kind when used as a logging key +// for the object being reconciled. +func LowerCamelCaseKind(obj runtime.Object) string { + kind := obj.GetObjectKind().GroupVersionKind().Kind + return strings.ToLower(kind[:1]) + kind[1:] +}