Skip to content

Commit

Permalink
logging: cleanup k/v pairs
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer [email protected]
  • Loading branch information
sbueringer committed Jul 15, 2022
1 parent e60fd04 commit 35d4dd7
Show file tree
Hide file tree
Showing 29 changed files with 137 additions and 88 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ tilt-settings.json
tilt-settings.yaml
tilt_modules
.tiltbuild
tilt_modules

# User-supplied clusterctl hacks settings
clusterctl-settings.json
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -172,7 +173,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
if configOwner == nil {
return ctrl.Result{}, nil
}
log = log.WithValues("kind", configOwner.GetKind(), "version", configOwner.GetResourceVersion(), "name", configOwner.GetName())
log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())

// Lookup the cluster the config owner is associated with
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
Expand All @@ -190,6 +191,9 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, err
}

log = log.WithValues("Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
Expand Down
11 changes: 6 additions & 5 deletions controllers/remote/cluster_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -235,11 +236,11 @@ func (t *ClusterCacheTracker) deleteAccessor(cluster client.ObjectKey) {
return
}

t.log.V(2).Info("Deleting clusterAccessor", "cluster", cluster.String())
t.log.V(2).Info("Deleting clusterAccessor", "Cluster", cluster.String())

t.log.V(4).Info("Stopping cache", "cluster", cluster.String())
t.log.V(4).Info("Stopping cache", "Cluster", cluster.String())
a.cache.Stop()
t.log.V(4).Info("Cache stopped", "cluster", cluster.String())
t.log.V(4).Info("Cache stopped", "Cluster", cluster.String())

delete(t.clusterAccessors, cluster)
}
Expand Down Expand Up @@ -286,7 +287,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
}

if a.watches.Has(input.Name) {
t.log.V(6).Info("Watch already exists", "namespace", input.Cluster.Namespace, "cluster", input.Cluster.Name, "name", input.Name)
t.log.V(6).Info("Watch already exists", "Namespace", klog.KRef(input.Cluster.Namespace, ""), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name)
return nil
}

Expand Down Expand Up @@ -391,7 +392,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
// NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case
// happens when the cache is explicitly stopped.
if err != nil && err != wait.ErrWaitTimeout {
t.log.Error(err, "Error health checking cluster", "cluster", in.cluster.String())
t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.deleteAccessor(in.cluster)
}
}
6 changes: 0 additions & 6 deletions controlplane/kubeadm/internal/control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package internal
import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -90,11 +89,6 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster
}, nil
}

// Logger returns a logger with useful context.
func (c *ControlPlane) Logger() logr.Logger {
return Log.WithValues("namespace", c.KCP.Namespace, "name", c.KCP.Name, "cluster-name", c.Cluster.Name)
}

// FailureDomains returns a slice of failure domain objects synced from the infrastructure provider into Cluster.Status.
func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains {
if c.Cluster.Status.FailureDomains == nil {
Expand Down
12 changes: 7 additions & 5 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -142,7 +143,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
log.Info("Cluster Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
log = log.WithValues("cluster", cluster.Name)
log = log.WithValues("Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

if annotations.IsPaused(cluster, kcp) {
log.Info("Reconciliation is paused for this object")
Expand Down Expand Up @@ -241,7 +243,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc

// reconcile handles KubeadmControlPlane reconciliation.
func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (res ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx)
log.Info("Reconcile KubeadmControlPlane")

// Make sure to reconcile the external infrastructure reference.
Expand Down Expand Up @@ -407,7 +409,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
// The implementation does not take non-control plane workloads into consideration. This may or may not change in the future.
// Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064.
func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx)
log.Info("Reconcile KubeadmControlPlane deletion")

// Gets all machines, not just control plane machines.
Expand Down Expand Up @@ -461,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
var errs []error
for i := range machinesToDelete {
m := machinesToDelete[i]
logger := log.WithValues("machine", m)
logger := log.WithValues("Machine", klog.KObj(m))
if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to cleanup owned machine")
errs = append(errs, err)
Expand Down Expand Up @@ -525,7 +527,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx cont
//
// NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this.
func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", controlPlane.Cluster.Name)
log := ctrl.LoggerFrom(ctx)

// If etcd is not managed by KCP this is a no-op.
if !controlPlane.IsEtcdManaged() {
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/remediation.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C
if err := patchHelper.Patch(ctx, machineToBeRemediated, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
clusterv1.MachineOwnerRemediatedCondition,
}}); err != nil {
log.Error(err, "Failed to patch control plane Machine", "machine", machineToBeRemediated.Name)
log.Error(err, "Failed to patch control plane Machine", "Machine", machineToBeRemediated.Name)
if retErr == nil {
retErr = errors.Wrapf(err, "failed to patch control plane Machine %s", machineToBeRemediated.Name)
}
Expand Down
15 changes: 8 additions & 7 deletions controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -36,7 +37,7 @@ import (
)

func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Perform an uncached read of all the owned machines. This check is in place to make sure
// that the controller cache is not misbehaving and we end up initializing the cluster more than once.
Expand Down Expand Up @@ -65,7 +66,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
}

func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait.
if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() {
Expand All @@ -92,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
controlPlane *internal.ControlPlane,
outdatedMachines collections.Machines,
) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Pick the Machine that we should scale down.
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
Expand Down Expand Up @@ -140,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
return ctrl.Result{}, err
}

logger = logger.WithValues("machine", machineToDelete.Name)
logger = logger.WithValues("Machine", klog.KObj(machineToDelete))
if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete control plane machine")
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown",
Expand All @@ -160,8 +161,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
// If the control plane is not passing preflight checks, it requeue.
//
// NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this.
func (r *KubeadmControlPlaneReconciler) preflightChecks(_ context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam
logger := controlPlane.Logger()
func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam
logger := ctrl.LoggerFrom(ctx)

// If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet,
// so it is considered ok to proceed.
Expand Down Expand Up @@ -200,7 +201,7 @@ loopmachines:
}

for _, condition := range allMachineHealthConditions {
if err := preflightCheckCondition("machine", machine, condition); err != nil {
if err := preflightCheckCondition("Machine", machine, condition); err != nil {
machineErrors = append(machineErrors, err)
}
}
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ import (
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)

selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
// Copy label selector to its status counterpart in string format.
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(
controlPlane *internal.ControlPlane,
machinesRequireUpgrade collections.Machines,
) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

if kcp.Spec.RolloutStrategy == nil || kcp.Spec.RolloutStrategy.RollingUpdate == nil {
return ctrl.Result{}, errors.New("rolloutStrategy is not set")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c
// It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources.
// TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ?
func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions exp/internal/controllers/machinepool_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ type getNodeReferencesResult struct {
}

func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
// Check that the MachinePool hasn't been deleted or in the process.
if !mp.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
Expand All @@ -65,7 +65,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
return ctrl.Result{}, nil
}

log = log.WithValues("cluster", cluster.Name)
log = log.WithValues("Cluster", cluster.Name)

// Check that the MachinePool has valid ProviderIDList.
if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) {
Expand Down
4 changes: 2 additions & 2 deletions exp/internal/controllers/machinepool_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *

// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)

// Call generic external reconciler if we have an external reference.
var bootstrapConfig *unstructured.Unstructured
Expand Down Expand Up @@ -226,7 +226,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster

// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)

// Call generic external reconciler.
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef)
Expand Down
14 changes: 14 additions & 0 deletions hack/observability/promtail/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,17 @@
config:
# publish data to loki
lokiAddress: http://loki:3100/loki/api/v1/push

snippets:
pipelineStages:
# Parse cluster and machine to make them available as labels.
- cri: { }
- json:
expressions:
controller:
cluster: join('/',[cluster.namespace,cluster.name])
machine: join('/',[machine.namespace,machine.name])
- labels:
controller:
cluster:
machine:
2 changes: 1 addition & 1 deletion internal/controllers/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clust

// reconcile handles cluster reconciliation.
func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx)

if cluster.Spec.Topology != nil {
if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ type Reconciler struct {
func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
err := ctrl.NewControllerManagedBy(mgr).
For(&clusterv1.ClusterClass{}).
Named("topology/clusterclass").
Named("clusterclass").
WithOptions(options).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Complete(r)
Expand Down
Loading

0 comments on commit 35d4dd7

Please sign in to comment.