diff --git a/.dockerignore b/.dockerignore index e12d8e1b4f40..7612be8db10a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -9,6 +9,7 @@ logos/ scripts/ **/*.md tilt-settings.json +tilt-settings.yaml tilt.d/ Tiltfile **/.tiltbuild diff --git a/.gitignore b/.gitignore index db922aa39439..43480017389e 100644 --- a/.gitignore +++ b/.gitignore @@ -75,4 +75,4 @@ out _releasenotes # Helm -.helm \ No newline at end of file +.helm diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 7bb951f6fd20..587816e78198 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strconv" + "strings" "time" "github.com/blang/semver" @@ -31,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,6 +50,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -110,9 +113,11 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl r.TokenTTL = DefaultTokenTTL } + tr := tlog.Reconciler(r) b := ctrl.NewControllerManagedBy(mgr). For(&bootstrapv1.KubeadmConfig{}). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "kubeadmconfig", "kubeadmconfig")). Watches( &source.Kind{Type: &clusterv1.Machine{}}, handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), @@ -125,7 +130,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) } - c, err := b.Build(r) + c, err := b.Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -172,7 +177,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques if configOwner == nil { return ctrl.Result{}, nil } - log = log.WithValues("kind", configOwner.GetKind(), "version", configOwner.GetResourceVersion(), "name", configOwner.GetName()) + log = log.WithValues(strings.ToLower(configOwner.GetKind()), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()).String(), "version", configOwner.GetResourceVersion()) // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) @@ -190,6 +195,9 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) + if annotations.IsPaused(cluster, config) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 7293a5074ed6..e649959c9925 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -19,7 +19,6 @@ package internal import ( "context" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -90,11 +89,6 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster }, nil } -// Logger returns a logger with useful context. -func (c *ControlPlane) Logger() logr.Logger { - return Log.WithValues("namespace", c.KCP.Namespace, "name", c.KCP.Name, "cluster-name", c.Cluster.Name) -} - // FailureDomains returns a slice of failure domain objects synced from the infrastructure provider into Cluster.Status. func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { if c.Cluster.Status.FailureDomains == nil { diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index fd9b5ad93175..04fcb49cf5f7 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,6 +44,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" @@ -77,12 +79,14 @@ type KubeadmControlPlaneReconciler struct { } func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&controlplanev1.KubeadmControlPlane{}). Owns(&clusterv1.Machine{}). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "kubeadmcontrolplane", "kubeadmcontrolplane")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -142,7 +146,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. log.Info("Cluster Controller has not yet set OwnerRef") return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) if annotations.IsPaused(cluster, kcp) { log.Info("Reconciliation is paused for this object") @@ -247,7 +252,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc // reconcile handles KubeadmControlPlane reconciliation. func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (res ctrl.Result, reterr error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane") // Make sure to reconcile the external infrastructure reference. @@ -407,7 +412,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * // The implementation does not take non-control plane workloads into consideration. This may or may not change in the future. // Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064. func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane deletion") // Gets all machines, not just control plane machines. @@ -461,7 +466,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu var errs []error for i := range machinesToDelete { m := machinesToDelete[i] - logger := log.WithValues("machine", m) + logger := log.WithValues("machine", klog.KObj(m).String()) if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to cleanup owned machine") errs = append(errs, err) @@ -525,7 +530,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx cont // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", controlPlane.Cluster.Name) + log := ctrl.LoggerFrom(ctx) // If etcd is not managed by KCP this is a no-op. if !controlPlane.IsEtcdManaged() { diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 0fcf1d022d38..6d03a9698f1e 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -25,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -36,7 +37,7 @@ import ( ) func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Perform an uncached read of all the owned machines. This check is in place to make sure // that the controller cache is not misbehaving and we end up initializing the cluster more than once. @@ -65,7 +66,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte } func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { @@ -92,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( controlPlane *internal.ControlPlane, outdatedMachines collections.Machines, ) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) @@ -140,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( return ctrl.Result{}, err } - logger = logger.WithValues("machine", machineToDelete.Name) + logger = logger.WithValues("machine", klog.KObj(machineToDelete).String()) if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to delete control plane machine") r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown", @@ -160,8 +161,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // If the control plane is not passing preflight checks, it requeue. // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. -func (r *KubeadmControlPlaneReconciler) preflightChecks(_ context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam - logger := controlPlane.Logger() +func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam + logger := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, // so it is considered ok to proceed. diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go index 9913bb6ad576..df268b5b9fee 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ b/controlplane/kubeadm/internal/controllers/upgrade.go @@ -37,7 +37,7 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( controlPlane *internal.ControlPlane, machinesRequireUpgrade collections.Machines, ) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) if kcp.Spec.RolloutStrategy == nil || kcp.Spec.RolloutStrategy.RollingUpdate == nil { return ctrl.Result{}, errors.New("rolloutStrategy is not set") diff --git a/go.mod b/go.mod index f4ac8fb2a9eb..0bde6a648371 100644 --- a/go.mod +++ b/go.mod @@ -154,3 +154,9 @@ require ( sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect ) + +// pick up a klog version with the "v" fix +replace k8s.io/klog/v2 => k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b + +// pick up a CR version with the LoggerCustomizer +replace sigs.k8s.io/controller-runtime => github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 diff --git a/go.sum b/go.sum index a9ec7b532967..b31ea1e3ae9d 100644 --- a/go.sum +++ b/go.sum @@ -221,7 +221,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= @@ -566,6 +565,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 h1:96vdkD/aJhUesygierntSRE1w3oPmNIiB8MQT3bfY4k= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1190,10 +1191,8 @@ k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIK k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b h1:WNBS7yqSdxZAO15Jp1e35mnWkEM/9LS2FAjlpVOQipI= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= @@ -1207,8 +1206,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= -sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= -sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= diff --git a/hack/observability/grafana/values.yaml b/hack/observability/grafana/values.yaml index 3c9a09271603..117b8a8e6dd8 100644 --- a/hack/observability/grafana/values.yaml +++ b/hack/observability/grafana/values.yaml @@ -24,4 +24,4 @@ datasources: # Disable grafana test framework testFramework: - enabled: false \ No newline at end of file + enabled: false diff --git a/hack/observability/promtail/values.yaml b/hack/observability/promtail/values.yaml index 0058b19ce826..6786f6d98f0d 100644 --- a/hack/observability/promtail/values.yaml +++ b/hack/observability/promtail/values.yaml @@ -2,4 +2,16 @@ config: # publish data to loki - lokiAddress: http://loki:3100/loki/api/v1/push \ No newline at end of file + lokiAddress: http://loki:3100/loki/api/v1/push + + snippets: + pipelineStages: + # Parse cluster and machine to make them available as labels. + - cri: { } + - json: + expressions: + cluster: cluster + machine: machine + - labels: + cluster: + machine: diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 6dfc91c41583..d054d313f385 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -175,3 +175,9 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +// pick up a klog version with the "v" fix +replace k8s.io/klog/v2 => k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b + +// pick up a CR version with the LoggerCustomizer +replace sigs.k8s.io/controller-runtime => github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 49b2843e6e2e..2299bb14fb77 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -1039,6 +1039,8 @@ github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYI github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 h1:96vdkD/aJhUesygierntSRE1w3oPmNIiB8MQT3bfY4k= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= @@ -1900,11 +1902,8 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b h1:WNBS7yqSdxZAO15Jp1e35mnWkEM/9LS2FAjlpVOQipI= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= @@ -1925,8 +1924,6 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= -sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= -sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 h1:ylYUI5uaq/guUFerFRVG81FHSA5/3+fERCE1RQbQUZ4= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9/go.mod h1:+sJcI1F0QI0Cv+8fp5rH5B2fK1LxzrAQqYnaPx9nY8I= sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o= diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index f987e76069d8..5706282a23a9 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" @@ -75,6 +76,7 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) controller, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.Cluster{}). Watches( @@ -82,8 +84,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt handler.EnqueueRequestsFromMapFunc(r.controlPlaneMachineToCluster), ). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "cluster", "cluster")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -178,7 +181,7 @@ func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clust // reconcile handles cluster reconciliation. func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) if cluster.Spec.Topology != nil { if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil { diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index b9798e4e7592..0cb4f5d539c9 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -57,12 +57,14 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.ClusterClass{}). - Named("topology/clusterclass"). + Named("clusterclass"). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "clusterclass", "clusterclass")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Complete(r) + Complete(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 9da2cd58c854..481480403fdc 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -46,6 +46,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" @@ -102,11 +103,13 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt r.nodeDeletionRetryTimeout = 10 * time.Second } + tr := log.Reconciler(r) controller, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.Machine{}). WithOptions(options). + WithLoggerCustomizer(log.LoggerCustomizer(mgr.GetLogger(), "machine", "machine")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -155,6 +158,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re m.Spec.ClusterName, m.Name, m.Namespace) } + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, m) { log.Info("Reconciliation is paused for this object") @@ -283,15 +289,19 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, return res, kerrors.NewAggregate(errs) } -func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) +func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { //nolint:gocyclo + log := ctrl.LoggerFrom(ctx) err := r.isDeleteNodeAllowed(ctx, cluster, m) isDeleteNodeAllowed := err == nil if err != nil { switch err { case errNoControlPlaneNodes, errLastControlPlaneNode, errNilNodeRef, errClusterIsBeingDeleted, errControlPlaneIsBeingDeleted: - log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", m.Status.NodeRef, "cause", err.Error()) + var nodeName = "" + if m.Status.NodeRef != nil { + nodeName = m.Status.NodeRef.Name + } + log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", klog.KRef("", nodeName).String(), "cause", err.Error()) default: return ctrl.Result{}, errors.Wrapf(err, "failed to check if Kubernetes Node deletion is allowed") } @@ -313,7 +323,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } - log.Info("Draining node", "node", m.Status.NodeRef.Name) + log.Info("Draining node", "node", klog.KRef("", m.Status.NodeRef.Name).String()) // The DrainingSucceededCondition never exists before the node is drained for the first time, // so its transition time can be used to record the first time draining. // This `if` condition prevents the transition time to be changed more than once. @@ -340,12 +350,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu if conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } - if ok, err := r.shouldWaitForNodeVolumes(ctx, cluster, m.Status.NodeRef.Name, m.Name); ok || err != nil { + if ok, err := r.shouldWaitForNodeVolumes(ctx, cluster, m.Status.NodeRef.Name); ok || err != nil { if err != nil { r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error wait for volume detach, node %q: %v", m.Status.NodeRef.Name, err) return ctrl.Result{}, err } - log.Info("Waiting for node volumes to be detached", "node", m.Status.NodeRef.Name) + log.Info("Waiting for node volumes to be detached", "node", klog.KRef("", m.Status.NodeRef.Name).String()) return ctrl.Result{}, nil } conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) @@ -385,7 +395,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // We only delete the node after the underlying infrastructure is gone. // https://github.com/kubernetes-sigs/cluster-api/issues/2565 if isDeleteNodeAllowed { - log.Info("Deleting node", "node", m.Status.NodeRef.Name) + log.Info("Deleting node", "node", klog.KRef("", m.Status.NodeRef.Name).String()) var deleteNodeErr error waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) { @@ -395,7 +405,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return true, nil }) if waitErr != nil { - log.Error(deleteNodeErr, "Timed out deleting node", "node", m.Status.NodeRef.Name) + log.Error(deleteNodeErr, "Timed out deleting node", "node", klog.KRef("", m.Status.NodeRef.Name).String()) conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr) @@ -443,7 +453,7 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { // isDeleteNodeAllowed returns nil only if the Machine's NodeRef is not nil // and if the Machine is not the last control plane node in the cluster. func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Return early if the cluster is being deleted. if !cluster.DeletionTimestamp.IsZero() { return errClusterIsBeingDeleted @@ -503,7 +513,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 } func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name, "node", nodeName) + log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName).String()) restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster)) if err != nil { @@ -542,7 +552,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, verbStr = "Evicted" } log.Info(fmt.Sprintf("%s pod from Node", verbStr), - "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) + "pod", klog.KObj(pod).String()) }, Out: writer{klog.Info}, ErrOut: writer{klog.Error}, @@ -574,8 +584,8 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, // this could cause issue for some storage provisioner, for example, vsphere-volume this is problematic // because if the node is deleted before detach success, then the underline VMDK will be deleted together with the Machine // so after node draining we need to check if all volumes are detached before deleting the node. -func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string, machineName string) (bool, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name, "node", nodeName, "machine", machineName) +func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) { + log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName).String()) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { @@ -595,7 +605,7 @@ func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clus } func (r *Reconciler) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, name string) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 3641258a385f..b3a4db756057 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,8 +42,7 @@ var ( ) func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "machine", machine.Name, "namespace", machine.Namespace) - log = log.WithValues("cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Check that the Machine has a valid ProviderID. if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { @@ -189,7 +189,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n for key, node := range nl.Items { nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) if err != nil { - log.Error(err, "Failed to parse ProviderID", "node", client.ObjectKeyFromObject(&nl.Items[key]).String()) + log.Error(err, "Failed to parse ProviderID", "node", klog.KRef("", nl.Items[key].GetName()).String()) continue } diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 8ffe77e9a2cc..60208c15705d 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -88,7 +89,7 @@ func (r *Reconciler) reconcilePhase(_ context.Context, m *clusterv1.Machine) { // reconcileExternal handles generic unstructured objects referenced by a Machine. func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine, ref *corev1.ObjectReference) (external.ReconcileOutput, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster).String()) if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { return external.ReconcileOutput{}, err @@ -172,7 +173,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a Machine. func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // If the bootstrap data is populated, set ready and return. if m.Spec.Bootstrap.DataSecretName != nil { @@ -237,7 +238,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1. // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Machine. func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, m, &m.Spec.InfrastructureRef) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 2ba6ee903ea1..ca18e87fea47 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -36,6 +37,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -72,6 +74,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt return err } + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.MachineDeployment{}). Owns(&clusterv1.MachineSet{}). @@ -80,8 +83,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt handler.EnqueueRequestsFromMapFunc(r.MachineSetToDeployments), ). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "machinedeployment", "machinedeployment")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -118,11 +122,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("machinedeployment", klog.KObj(deployment).String()) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, deployment.Namespace, deployment.Spec.ClusterName) if err != nil { return ctrl.Result{}, err } + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, deployment) { log.Info("Reconciliation is paused for this object") @@ -260,19 +270,19 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) if err != nil { - log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector", "machineset", ms.Name) + log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector", "machineset", klog.KObj(ms).String()) continue } // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() { - log.Info("Skipping MachineSet as the selector is empty", "machineset", ms.Name) + log.Info("Skipping MachineSet as the selector is empty", "machineset", klog.KObj(ms).String()) continue } // Skip this MachineSet unless either selector matches or it has a controller ref pointing to this MachineDeployment if !selector.Matches(labels.Set(ms.Labels)) && !metav1.IsControlledBy(ms, d) { - log.V(4).Info("Skipping MachineSet, label mismatch", "machineset", ms.Name) + log.V(4).Info("Skipping MachineSet, label mismatch", "machineset", klog.KObj(ms).String()) continue } @@ -283,7 +293,7 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster r.recorder.Eventf(d, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt MachineSet %q: %v", ms.Name, err) continue } - log.Info("Adopted MachineSet into MachineDeployment", "machineset", ms.Name) + log.Info("Adopted MachineSet into MachineDeployment", "machineset", klog.KObj(ms).String()) r.recorder.Eventf(d, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted MachineSet %q", ms.Name) } diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 751ad34c0656..9cd7cf6e62d1 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -33,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -168,7 +169,7 @@ func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int } func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger logr.Logger) (int32, bool) { - logger = logger.WithValues("machineset", ms.Name, "annotationKey", annotationKey) + logger = logger.WithValues("machineset", klog.KObj(ms).String(), "annotationKey", annotationKey) annotationValue, ok := ms.Annotations[annotationKey] if !ok { @@ -185,7 +186,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { - logger = logger.WithValues("machineset", newMS.Name) + logger = logger.WithValues("machineset", klog.KObj(newMS).String()) // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 91ddee5f46b6..b4c324e046b0 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -34,6 +34,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -46,6 +47,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/internal/controllers/machine" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -84,6 +86,7 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) controller, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.MachineHealthCheck{}). Watches( @@ -91,8 +94,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt handler.EnqueueRequestsFromMapFunc(r.machineToMachineHealthCheck), ). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "machinehealthcheck", "machinehealthcheck")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -132,7 +136,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - log = log.WithValues("cluster", m.Spec.ClusterName) + log = log.WithValues("cluster", klog.KRef(m.Namespace, m.Spec.ClusterName).String()) ctx = ctrl.LoggerInto(ctx, log) cluster, err := util.GetClusterByName(ctx, r.Client, m.Namespace, m.Spec.ClusterName) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 5d85a596c0e6..5d3d302084a5 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -40,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/internal/controllers/machine" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" @@ -85,6 +87,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt return err } + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.MachineSet{}). Owns(&clusterv1.Machine{}). @@ -93,8 +96,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt handler.EnqueueRequestsFromMapFunc(r.MachineToMachineSets), ). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "machineset", "machineset")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -135,6 +139,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, machineSet) { log.Info("Reconciliation is paused for this object") @@ -259,11 +266,11 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, // Attempt to adopt machine if it meets previous conditions and it has no controller references. if metav1.GetControllerOf(machine) == nil { if err := r.adoptOrphan(ctx, machineSet, machine); err != nil { - log.Error(err, "Failed to adopt Machine", "machine", machine.Name) + log.Error(err, "Failed to adopt Machine", "machine", klog.KObj(machine).String()) r.recorder.Eventf(machineSet, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt Machine %q: %v", machine.Name, err) continue } - log.Info("Adopted Machine", "machine", machine.Name) + log.Info("Adopted Machine", "machine", klog.KObj(machine).String()) r.recorder.Eventf(machineSet, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted Machine %q", machine.Name) } @@ -278,7 +285,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, continue } if conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) { - log.Info("Deleting unhealthy machine", "machine", machine.GetName()) + log.Info("Deleting unhealthy machine", "machine", klog.KObj(machine).String()) patch := client.MergeFrom(machine.DeepCopy()) if err := r.Client.Delete(ctx, machine); err != nil { errs = append(errs, errors.Wrap(err, "failed to delete")) @@ -566,7 +573,7 @@ func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []* func (r *Reconciler) MachineToMachineSets(o client.Object) []ctrl.Request { ctx := context.Background() // This won't log unless the global logger is set - log := ctrl.LoggerFrom(ctx, "object", client.ObjectKeyFromObject(o)) + log := ctrl.LoggerFrom(ctx, "object", klog.KObj(o).String()) result := []ctrl.Request{} m, ok := o.(*clusterv1.Machine) @@ -689,7 +696,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste newStatus.ObservedGeneration = ms.Generation newStatus.DeepCopyInto(&ms.Status) - log.V(4).Info(fmt.Sprintf("Updating status for %v: %s/%s, ", ms.Kind, ms.Namespace, ms.Name) + + log.V(4).Info("Updating status: " + fmt.Sprintf("replicas %d->%d (need %d), ", ms.Status.Replicas, newStatus.Replicas, desiredReplicas) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas) + diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index 883a9aa896ac..8018c179f9a1 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,6 +38,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/patch" @@ -73,6 +75,7 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.Cluster{}, builder.WithPredicates( // Only reconcile Cluster with topology. @@ -90,8 +93,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt builder.WithPredicates(predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx))), ). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "topology/cluster", "cluster")). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Build(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -129,6 +133,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } cluster.Kind = "Cluster" + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) + // Return early, if the Cluster does not use a managed topology. // NOTE: We're already filtering events, but this is a safeguard for cases like e.g. when // there are MachineDeployments which have the topology owned label, but the corresponding diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index 5910f4ab1951..358450fa0462 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -53,15 +53,17 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.MachineDeployment{}). Named("topology/machinedeployment"). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "topology/machinedeployment", "machinedeployment")). WithEventFilter(predicates.All(ctrl.LoggerFrom(ctx), predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx)), )). - Complete(r) + Complete(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index 09d129f2b113..f36cbec0725b 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -54,15 +54,17 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.MachineSet{}). Named("topology/machineset"). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "topology/machineset", "machineset")). WithEventFilter(predicates.All(ctrl.LoggerFrom(ctx), predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx)), )). - Complete(r) + Complete(tr) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } diff --git a/internal/log/log.go b/internal/log/log.go index b946000d9b59..64e1fd5eef4d 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -18,16 +18,82 @@ package log import ( "context" + crand "crypto/rand" + "encoding/binary" + "encoding/hex" "fmt" + "math/rand" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +// LoggerCustomizer is a util to create a LoggerCustomizer. +func LoggerCustomizer(log logr.Logger, controllerName, kind string) func(_ logr.Logger, req reconcile.Request) logr.Logger { + // FIXME: We need further discussion on how we want to to customize the CR logger/context + // e.g. to make it future-proof for tracing. + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + randSource := rand.New(rand.NewSource(rngSeed)) //nolint:gosec // math/rand is enough, we don't need crypto/rand. + + return func(_ logr.Logger, req reconcile.Request) logr.Logger { + return log. + WithValues("controller", controllerName). + WithValues("reconcileID", generateReconcileID(randSource)). + WithValues(kind, klog.KRef(req.Namespace, req.Name).String()) + } +} + +// logReconciler adds logging. +type logReconciler struct { + Reconciler reconcile.Reconciler + randSource *rand.Rand +} + +// Reconciler creates a reconciles which wraps the current reconciler and adds logs & traces. +func Reconciler(reconciler reconcile.Reconciler) reconcile.Reconciler { + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + return &logReconciler{ + Reconciler: reconciler, + randSource: rand.New(rand.NewSource(rngSeed)), //nolint:gosec // math/rand is enough, we don't need crypto/rand. + } +} + +// Reconcile +// FIXME: We should really make sure the log.Error in CR gets our k/v pairs too (currently reconcileID). +// * creating this span in CR. +// * disabling the error log in CR and doing it ourselves. +func (r *logReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + //// Add reconcileID to logger. + //log := ctrl.LoggerFrom(ctx) + //log = log.WithValues("reconcileID", generateReconcileID(r.randSource)) + //ctx = ctrl.LoggerInto(ctx, log) + // + //log.Info("Reconciliation started") + // + res, err := r.Reconciler.Reconcile(ctx, req) + //if err != nil { + // log.Error(err, "Reconciliation finished with error") + //} else { + // log.Info("Reconciliation finished") + //} + // + return res, err +} + +func generateReconcileID(randSource *rand.Rand) string { + id := [16]byte{} + _, _ = randSource.Read(id[:]) + return hex.EncodeToString(id[:]) +} + // LoggerFrom returns a logger with predefined values from a context.Context. // The logger, when used with controllers, can be expected to contain basic information about the object // that's being reconciled like: diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 8a1ab2babacd..b30a3c0618d7 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -60,7 +60,7 @@ providers: value: ../../../config/default replacements: - old: --metrics-bind-addr=localhost:8080 - new: --metrics-bind-addr=:8080 + new: "--metrics-bind-addr=:8080\n - --logging-format=json" files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -98,7 +98,7 @@ providers: value: ../../../bootstrap/kubeadm/config/default replacements: - old: --metrics-bind-addr=localhost:8080 - new: --metrics-bind-addr=:8080 + new: "--metrics-bind-addr=:8080\n - --logging-format=json" files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -136,7 +136,7 @@ providers: value: ../../../controlplane/kubeadm/config/default replacements: - old: --metrics-bind-addr=localhost:8080 - new: --metrics-bind-addr=:8080 + new: "--metrics-bind-addr=:8080\n - --logging-format=json" files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -177,7 +177,7 @@ providers: value: ../../../test/infrastructure/docker/config/default replacements: - old: --metrics-bind-addr=localhost:8080 - new: --metrics-bind-addr=:8080 + new: "--metrics-bind-addr=:8080\n - --logging-format=json" files: # Add cluster templates - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template.yaml" diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 405a6d57edfb..4789b4ddba73 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -19,6 +19,7 @@ package framework import ( "bufio" "context" + "encoding/json" "fmt" "io" "os" @@ -98,6 +99,17 @@ type WatchDeploymentLogsInput struct { LogPath string } +type Metadata struct { + Job string `json:"job"` + Namespace string `json:"namespace"` + App string `json:"app"` + Pod string `json:"pod"` + Container string `json:"container"` + NodeName string `json:"node_name"` + Stream string `json:"stream"` + // FIXME: cluster/machine? (per log line) +} + // WatchDeploymentLogs streams logs for all containers for all pods belonging to a deployment. Each container's logs are streamed // in a separate goroutine so they can all be streamed concurrently. This only causes a test failure if there are errors // retrieving the deployment, its pods, or setting up a log file. If there is an error with the log streaming itself, @@ -121,6 +133,23 @@ func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) { for _, container := range deployment.Spec.Template.Spec.Containers { log.Logf("Creating log watcher for controller %s/%s, pod %s, container %s", input.Deployment.Namespace, input.Deployment.Name, pod.Name, container.Name) + logMetadataFile := filepath.Clean(path.Join(input.LogPath, input.Deployment.Name, pod.Name, container.Name+".json")) + Expect(os.MkdirAll(filepath.Dir(logMetadataFile), 0750)).To(Succeed()) + + // FIXME: verify where portal takes the data from + metadata := Metadata{ + Job: input.Deployment.Namespace + "/" + input.Deployment.Name, + Namespace: input.Deployment.Namespace, + App: input.Deployment.Name, + Pod: pod.Name, + Container: container.Name, + NodeName: pod.Spec.NodeName, + Stream: "stderr", + } + metadataBytes, err := json.Marshal(&metadata) + Expect(err).To(BeNil()) + Expect(os.WriteFile(logMetadataFile, metadataBytes, 0600)).To(Succeed()) + // Watch each container's logs in a goroutine so we can stream them all concurrently. go func(pod corev1.Pod, container corev1.Container) { defer GinkgoRecover() diff --git a/test/go.mod b/test/go.mod index 335f912dd9e7..84c76464ef11 100644 --- a/test/go.mod +++ b/test/go.mod @@ -123,3 +123,9 @@ require ( sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect ) + +// pick up a klog version with the "v" fix +replace k8s.io/klog/v2 => k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b + +// pick up a CR version with the LoggerCustomizer +replace sigs.k8s.io/controller-runtime => github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 diff --git a/test/go.sum b/test/go.sum index b89563b61082..6ccd79b7cf30 100644 --- a/test/go.sum +++ b/test/go.sum @@ -808,6 +808,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64 h1:96vdkD/aJhUesygierntSRE1w3oPmNIiB8MQT3bfY4k= +github.com/sbueringer/controller-runtime v0.2.0-beta.1.0.20220216141822-42d8935efb64/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1541,11 +1543,8 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b h1:WNBS7yqSdxZAO15Jp1e35mnWkEM/9LS2FAjlpVOQipI= +k8s.io/klog/v2 v2.40.2-0.20220216121836-84f3ebd27f1b/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= @@ -1563,8 +1562,6 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= -sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= -sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA= diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index 189aa401c7e0..92a5db3f9d3d 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -30,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/test/infrastructure/container" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" @@ -73,7 +75,8 @@ func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) // Create a helper for managing a docker container hosting the loadbalancer. externalLoadBalancer, err := docker.NewLoadBalancer(ctx, cluster, dockerCluster) @@ -190,11 +193,13 @@ func (r *DockerClusterReconciler) reconcileDelete(ctx context.Context, dockerClu // SetupWithManager will add watches for this controller. func (r *DockerClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&infrav1.DockerCluster{}). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "dockercluster", "dockercluster")). WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). - Build(r) + Build(tr) if err != nil { return err } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 24e365414516..5054377da94b 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -35,6 +36,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/test/infrastructure/container" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" @@ -80,7 +82,8 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("machine", machine.Name) + log = log.WithValues("machine", klog.KObj(machine).String()) + ctx = ctrl.LoggerInto(ctx, log) // Fetch the Cluster. cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) @@ -93,7 +96,8 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("cluster", klog.KObj(cluster).String()) + ctx = ctrl.LoggerInto(ctx, log) // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, dockerMachine) { @@ -112,8 +116,6 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("docker-cluster", dockerCluster.Name) - // Initialize the patch helper patchHelper, err := patch.NewHelper(dockerMachine, r) if err != nil { @@ -362,9 +364,11 @@ func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl return err } + tr := tlog.Reconciler(r) c, err := ctrl.NewControllerManagedBy(mgr). For(&infrav1.DockerMachine{}). WithOptions(options). + WithLoggerCustomizer(tlog.LoggerCustomizer(mgr.GetLogger(), "dockermachine", "dockermachine")). WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). Watches( &source.Kind{Type: &clusterv1.Machine{}}, @@ -374,7 +378,7 @@ func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl &source.Kind{Type: &infrav1.DockerCluster{}}, handler.EnqueueRequestsFromMapFunc(r.DockerClusterToDockerMachines), ). - Build(r) + Build(tr) if err != nil { return err } diff --git a/test/infrastructure/docker/internal/docker/loadbalancer.go b/test/infrastructure/docker/internal/docker/loadbalancer.go index badf1079d870..3130aff08880 100644 --- a/test/infrastructure/docker/internal/docker/loadbalancer.go +++ b/test/infrastructure/docker/internal/docker/loadbalancer.go @@ -107,7 +107,7 @@ func (s *LoadBalancer) containerName() string { // Create creates a docker container hosting a load balancer for the cluster. func (s *LoadBalancer) Create(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) - log = log.WithValues("cluster", s.name, "ipFamily", s.ipFamily) + log = log.WithValues("ipFamily", s.ipFamily) listenAddr := "0.0.0.0" if s.ipFamily == clusterv1.IPv6IPFamily {