Skip to content

Commit

Permalink
logging: cleanup k/v pairs
Browse files Browse the repository at this point in the history
Co-authored-by: Stefan Büringer [email protected]
Signed-off-by: killianmuldoon <[email protected]>
  • Loading branch information
killianmuldoon committed Jul 27, 2022
1 parent ac42ba2 commit 1a286fd
Show file tree
Hide file tree
Showing 37 changed files with 233 additions and 254 deletions.
39 changes: 21 additions & 18 deletions bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -101,7 +102,7 @@ type Scope struct {
// SetupWithManager sets up the reconciler with the Manager.
func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
if r.KubeadmInitLock == nil {
r.KubeadmInitLock = locking.NewControlPlaneInitMutex(ctrl.LoggerFrom(ctx).WithName("init-locker"), mgr.GetClient())
r.KubeadmInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient())
}
if r.remoteClientGetter == nil {
r.remoteClientGetter = remote.NewClusterClient
Expand Down Expand Up @@ -172,7 +173,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
if configOwner == nil {
return ctrl.Result{}, nil
}
log = log.WithValues("kind", configOwner.GetKind(), "version", configOwner.GetResourceVersion(), "name", configOwner.GetName())
log = log.WithValues(configOwner.LowerCamelCaseKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())

// Lookup the cluster the config owner is associated with
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
Expand All @@ -190,6 +191,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, err
}

ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))

if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
Expand Down Expand Up @@ -321,14 +324,14 @@ func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Co
return ctrl.Result{}, err
}
if shouldRotate {
log.V(2).Info("Creating new bootstrap token")
log.Info("Creating new bootstrap token, the existing one should be rotated")
token, err := createToken(ctx, remoteClient, r.TokenTTL)
if err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to create new bootstrap token")
}

config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "Token", token)
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")

// update the bootstrap data
return r.joinWorker(ctx, scope)
Expand Down Expand Up @@ -378,7 +381,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
}
}()

scope.Info("Creating BootstrapData for the init control plane")
scope.Info("Creating BootstrapData for the first control plane")

// Nb. in this case JoinConfiguration should not be defined by users, but in case of misconfigurations, CABPK simply ignore it

Expand Down Expand Up @@ -495,6 +498,8 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
}

func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) {
scope.Info("Creating BootstrapData for the worker node")

certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath)
err := certificates.Lookup(
ctx,
Expand Down Expand Up @@ -534,8 +539,6 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope)
return ctrl.Result{}, errors.New("Machine is a Worker, but JoinConfiguration.ControlPlane is set in the KubeadmConfig object")
}

scope.Info("Creating BootstrapData for the worker node")

verbosityFlag := ""
if scope.Config.Spec.Verbosity != nil {
verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
Expand Down Expand Up @@ -592,6 +595,8 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope)
}

func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) {
scope.Info("Creating BootstrapData for the joining control plane")

if !scope.ConfigOwner.IsControlPlaneMachine() {
return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind())
}
Expand Down Expand Up @@ -635,8 +640,6 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S
return ctrl.Result{}, err
}

scope.Info("Creating BootstrapData for the join control plane")

verbosityFlag := ""
if scope.Config.Spec.Verbosity != nil {
verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
Expand Down Expand Up @@ -889,7 +892,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste

apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "APIServerEndpoint", apiServerEndpoint)
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint)
}

// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
Expand All @@ -905,7 +908,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste
}

config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken")
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")
}

// If the BootstrapToken does not contain any CACertHashes then force skip CA Verification
Expand All @@ -927,39 +930,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co
// then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster.
if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() {
config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
log.Info("Altering ClusterConfiguration", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
}

// If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name
if config.Spec.ClusterConfiguration.ClusterName == "" {
config.Spec.ClusterConfiguration.ClusterName = cluster.Name
log.Info("Altering ClusterConfiguration", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
}

// If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined
if cluster.Spec.ClusterNetwork != nil {
if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" {
config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain
log.Info("Altering ClusterConfiguration", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
}
if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" &&
cluster.Spec.ClusterNetwork.Services != nil &&
len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String()
log.Info("Altering ClusterConfiguration", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
}
if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" &&
cluster.Spec.ClusterNetwork.Pods != nil &&
len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 {
config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String()
log.Info("Altering ClusterConfiguration", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
}
}

// If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined
if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil {
config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version
log.Info("Altering ClusterConfiguration", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
}
}

Expand Down Expand Up @@ -998,7 +1001,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope
if !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", secret.Name, "KubeadmConfig", scope.Config.Name)
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", klog.KObj(secret))
if err := r.Client.Update(ctx, secret); err != nil {
return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
Expand Down
26 changes: 12 additions & 14 deletions bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ import (
"encoding/json"
"fmt"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -36,14 +37,12 @@ const semaphoreInformationKey = "lock-information"

// ControlPlaneInitMutex uses a ConfigMap to synchronize cluster initialization.
type ControlPlaneInitMutex struct {
log logr.Logger
client client.Client
}

// NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init.
func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPlaneInitMutex {
func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex {
return &ControlPlaneInitMutex{
log: log,
client: client,
}
}
Expand All @@ -52,7 +51,7 @@ func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPla
func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName, "machine-name", machine.Name)
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
Expand All @@ -61,12 +60,12 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
case apierrors.IsNotFound(err):
break
case err != nil:
log.Error(err, "Failed to acquire lock")
log.Error(err, "Failed to acquire init lock")
return false
default: // Successfully found an existing config map.
info, err := sema.information()
if err != nil {
log.Error(err, "Failed to get information about the existing lock")
log.Error(err, "Failed to get information about the existing init lock")
return false
}
// The machine requesting the lock is the machine that created the lock, therefore the lock is acquired.
Expand All @@ -79,31 +78,31 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
Namespace: cluster.Namespace,
Name: info.MachineName,
}, &clusterv1.Machine{}); err != nil {
log.Error(err, "Failed to get machine holding ControlPlane lock")
log.Error(err, "Failed to get machine holding init lock")
if apierrors.IsNotFound(err) {
c.Unlock(ctx, cluster)
}
}
log.Info("Waiting on another machine to initialize", "init-machine", info.MachineName)
log.Info(fmt.Sprintf("Waiting for Machine %s to initialize", info.MachineName))
return false
}

// Adds owner reference, namespace and name
sema.setMetadata(cluster)
// Adds the additional information
if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil {
log.Error(err, "Failed to acquire lock while setting semaphore information")
log.Error(err, "Failed to acquire init lock while setting semaphore information")
return false
}

log.Info("Attempting to acquire the lock")
err = c.client.Create(ctx, sema.ConfigMap)
switch {
case apierrors.IsAlreadyExists(err):
log.Info("Cannot acquire the lock. The lock has been acquired by someone else")
log.Info("Cannot acquire the init lock. The init lock has been acquired by someone else")
return false
case err != nil:
log.Error(err, "Error acquiring the lock")
log.Error(err, "Error acquiring the init lock")
return false
default:
return true
Expand All @@ -114,8 +113,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName)
log.Info("Checking for lock")
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
Expand Down
Loading

0 comments on commit 1a286fd

Please sign in to comment.