diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 183f99c1e288..dfc1e9e75646 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -991,7 +991,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String() config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint - log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint) + log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "apiServerEndpoint", apiServerEndpoint) } // if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join @@ -1029,39 +1029,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co // then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster. if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() { config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String() - log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) + log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "controlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) } // If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name if config.Spec.ClusterConfiguration.ClusterName == "" { config.Spec.ClusterConfiguration.ClusterName = cluster.Name - log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName) + log.V(3).Info("Altering ClusterConfiguration.ClusterName", "clusterName", config.Spec.ClusterConfiguration.ClusterName) } // If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined if cluster.Spec.ClusterNetwork != nil { if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" { config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain - log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) + log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "dnsDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) } if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" && cluster.Spec.ClusterNetwork.Services != nil && len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String() - log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "serviceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) } if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" && cluster.Spec.ClusterNetwork.Pods != nil && len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String() - log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "podSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) } } // If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil { config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version - log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) + log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "kubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) } } diff --git a/bootstrap/kubeadm/main.go b/bootstrap/kubeadm/main.go index 4ef2ed5bf08c..847f9733ce23 100644 --- a/bootstrap/kubeadm/main.go +++ b/bootstrap/kubeadm/main.go @@ -273,7 +273,7 @@ func main() { setupWebhooks(mgr) setupReconcilers(ctx, mgr) - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) diff --git a/cmd/clusterctl/client/alpha/machinedeployment.go b/cmd/clusterctl/client/alpha/machinedeployment.go index 301eee10ba5a..dbbaac049b6c 100644 --- a/cmd/clusterctl/client/alpha/machinedeployment.go +++ b/cmd/clusterctl/client/alpha/machinedeployment.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -134,26 +135,27 @@ func getMachineSetsForDeployment(ctx context.Context, proxy cluster.Proxy, md *c filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items)) for idx := range machineSets.Items { ms := &machineSets.Items[idx] + log := log.WithValues("MachineSet", klog.KObj(ms)) // Skip this MachineSet if its controller ref is not pointing to this MachineDeployment if !metav1.IsControlledBy(ms, md) { - log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment") continue } selector, err := metav1.LabelSelectorAsSelector(&md.Spec.Selector) if err != nil { - log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector") continue } // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() { - log.V(5).Info("Skipping MachineSet as the selector is empty", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet as the selector is empty") continue } // Skip this MachineSet if selector does not match if !selector.Matches(labels.Set(ms.Labels)) { - log.V(5).Info("Skipping MachineSet, label mismatch", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet, label mismatch") continue } filtered = append(filtered, ms) diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index 637dbffc5c7e..b387398891df 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -177,7 +177,7 @@ func (cm *certManagerClient) EnsureInstalled(ctx context.Context) error { func (cm *certManagerClient) install(ctx context.Context, version string, objs []unstructured.Unstructured) error { log := logf.Log - log.Info("Installing cert-manager", "Version", version) + log.Info("Installing cert-manager", "version", version) // Install all cert-manager manifests createCertManagerBackoff := newWriteBackoff() @@ -282,7 +282,7 @@ func (cm *certManagerClient) EnsureLatestVersion(ctx context.Context) error { // delete the cert-manager version currently installed (because it should be upgraded); // NOTE: CRDs, and namespace are preserved in order to avoid deletion of user objects; // web-hooks are preserved to avoid a user attempting to CREATE a cert-manager resource while the upgrade is in progress. - log.Info("Deleting cert-manager", "Version", currentVersion) + log.Info("Deleting cert-manager", "version", currentVersion) if err := cm.deleteObjs(ctx, objs); err != nil { return err } diff --git a/cmd/clusterctl/client/cluster/client.go b/cmd/clusterctl/client/cluster/client.go index 9cc824fc1a5a..a6c0e3b87868 100644 --- a/cmd/clusterctl/client/cluster/client.go +++ b/cmd/clusterctl/client/cluster/client.go @@ -227,7 +227,7 @@ func retryWithExponentialBackoff(ctx context.Context, opts wait.Backoff, operati i++ if err := operation(ctx); err != nil { if i < opts.Steps { - log.V(5).Info("Retrying with backoff", "Cause", err.Error()) + log.V(5).Info("Retrying with backoff", "cause", err.Error()) return false, nil } return false, err diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index d8bee25f1fa1..bf5a7be3870b 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -135,7 +136,7 @@ func (p *providerComponents) createObj(ctx context.Context, obj unstructured.Uns func (p *providerComponents) Delete(ctx context.Context, options DeleteOptions) error { log := logf.Log - log.Info("Deleting", "Provider", options.Provider.Name, "Version", options.Provider.Version, "Namespace", options.Provider.Namespace) + log.Info("Deleting", "Provider", klog.KObj(&options.Provider), "providerVersion", options.Provider.Version) // Fetch all the components belonging to a provider. // We want that the delete operation is able to clean-up everything. @@ -264,7 +265,7 @@ func (p *providerComponents) DeleteWebhookNamespace(ctx context.Context) error { func (p *providerComponents) ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error { log := logf.Log - log.Info("Checking for CRs", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace) + log.Info("Checking for CRs", "Provider", klog.KObj(&provider), "providerVersion", provider.Version) proxyClient, err := p.proxy.NewClient(ctx) if err != nil { diff --git a/cmd/clusterctl/client/cluster/crd_migration.go b/cmd/clusterctl/client/cluster/crd_migration.go index 98098f911461..08a62a85df76 100644 --- a/cmd/clusterctl/client/cluster/crd_migration.go +++ b/cmd/clusterctl/client/cluster/crd_migration.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme" @@ -117,7 +118,7 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes // Note: We want to migrate objects to new storage versions as soon as possible // to prevent unnecessary conversion webhook calls. if currentStatusStoredVersions.Len() == 1 && currentCRD.Status.StoredVersions[0] == currentStorageVersion { - log.V(2).Info("CRD migration check passed", "name", newCRD.Name) + log.V(2).Info("CRD migration check passed", "CustomResourceDefinition", klog.KObj(newCRD)) return false, nil } @@ -141,8 +142,8 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes } func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error { - log := logf.Log - log.Info("Migrating CRs, this operation may take a while...", "kind", crd.Spec.Names.Kind) + log := logf.Log.WithValues("CustomResourceDefinition", klog.KObj(crd)) + log.Info("Migrating CRs, this operation may take a while...") list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(schema.GroupVersionKind{ @@ -182,7 +183,7 @@ func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextens } } - log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i), "kind", crd.Spec.Names.Kind) + log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i)) return nil } diff --git a/cmd/clusterctl/client/cluster/installer.go b/cmd/clusterctl/client/cluster/installer.go index 80cb0ce6bfe2..6e428e64777f 100644 --- a/cmd/clusterctl/client/cluster/installer.go +++ b/cmd/clusterctl/client/cluster/installer.go @@ -108,16 +108,16 @@ func (i *providerInstaller) Install(ctx context.Context, opts InstallOptions) ([ func installComponentsAndUpdateInventory(ctx context.Context, components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error { log := logf.Log - log.Info("Installing", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) + log.Info("Installing", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) inventoryObject := components.InventoryObject() - log.V(1).Info("Creating objects", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) + log.V(1).Info("Creating objects", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) if err := providerComponents.Create(ctx, components.Objs()); err != nil { return err } - log.V(1).Info("Creating inventory entry", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) + log.V(1).Info("Creating inventory entry", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) return providerInventory.Create(ctx, inventoryObject) } diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index 942d1fc090ca..90f072e20d19 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -462,7 +462,7 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { continue } - log.V(5).Info(typeMeta.Kind, "Count", len(objList.Items)) + log.V(5).Info(typeMeta.Kind, "count", len(objList.Items)) for i := range objList.Items { obj := objList.Items[i] if err := o.addObj(&obj); err != nil { @@ -471,7 +471,7 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { } } - log.V(1).Info("Total objects", "Count", len(o.uidToNode)) + log.V(1).Info("Total objects", "count", len(o.uidToNode)) // Completes the graph by searching for soft ownership relations such as secrets linked to the cluster // by a naming convention (without any explicit OwnerReference). diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 9f1842671b84..7e188564524b 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -452,7 +453,7 @@ func (u *providerUpgrader) doUpgrade(ctx context.Context, upgradePlan *UpgradePl func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clusterctlv1.Provider) error { log := logf.Log - log.Info("Scaling down", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace) + log.Info("Scaling down", "Provider", klog.KObj(&provider), "providerVersion", &provider.Version) cs, err := u.proxy.NewClient(ctx) if err != nil { @@ -473,7 +474,8 @@ func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clust // Scale down provider Deployments. for _, deployment := range deploymentList.Items { - log.V(5).Info("Scaling down", "Deployment", deployment.Name, "Namespace", deployment.Namespace) + deployment := deployment + log.V(5).Info("Scaling down", "Deployment", klog.KObj(&deployment)) if err := scaleDownDeployment(ctx, cs, deployment); err != nil { return err } diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 96b3ef78351c..02bb984c5876 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -137,7 +137,7 @@ func (v *viperReader) Init(ctx context.Context, path string) error { if err := viper.ReadInConfig(); err != nil { return err } - log.V(5).Info("Using configuration", "File", viper.ConfigFileUsed()) + log.V(5).Info("Using configuration", "file", viper.ConfigFileUsed()) return nil } diff --git a/cmd/clusterctl/client/repository/clusterclass_client.go b/cmd/clusterctl/client/repository/clusterclass_client.go index dbd595d0fc5e..7b199d81a5e9 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client.go +++ b/cmd/clusterctl/client/repository/clusterclass_client.go @@ -81,13 +81,13 @@ func (cc *clusterClassClient) Get(ctx context.Context, name, targetNamespace str } if rawArtifact == nil { - log.V(5).Info("Fetching", "File", filename, "Provider", cc.provider.Name(), "Type", cc.provider.Type(), "Version", version) + log.V(5).Info("Fetching", "file", filename, "provider", cc.provider.Name(), "type", cc.provider.Type(), "version", version) rawArtifact, err = cc.repository.GetFile(ctx, version, filename) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", filename, cc.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", filename, "Provider", cc.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", filename, "provider", cc.provider.ManifestLabel(), "version", version) } return NewTemplate(TemplateInput{ diff --git a/cmd/clusterctl/client/repository/components_client.go b/cmd/clusterctl/client/repository/components_client.go index ca002a23dcbc..1613f3e7124e 100644 --- a/cmd/clusterctl/client/repository/components_client.go +++ b/cmd/clusterctl/client/repository/components_client.go @@ -91,13 +91,13 @@ func (f *componentsClient) getRawBytes(ctx context.Context, options *ComponentsO } if file == nil { - log.V(5).Info("Fetching", "File", path, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", options.Version) + log.V(5).Info("Fetching", "file", path, "provider", f.provider.Name(), "type", f.provider.Type(), "version", options.Version) file, err = f.repository.GetFile(ctx, options.Version, path) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", path, f.provider.ManifestLabel()) } } else { - log.Info("Using", "Override", path, "Provider", f.provider.ManifestLabel(), "Version", options.Version) + log.Info("Using", "override", path, "provider", f.provider.ManifestLabel(), "version", options.Version) } return file, nil } diff --git a/cmd/clusterctl/client/repository/metadata_client.go b/cmd/clusterctl/client/repository/metadata_client.go index 25d95098d91c..84a7897b575a 100644 --- a/cmd/clusterctl/client/repository/metadata_client.go +++ b/cmd/clusterctl/client/repository/metadata_client.go @@ -75,13 +75,13 @@ func (f *metadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error return nil, err } if file == nil { - log.V(5).Info("Fetching", "File", metadataFile, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", version) + log.V(5).Info("Fetching", "file", metadataFile, "provider", f.provider.Name(), "type", f.provider.Type(), "version", version) file, err = f.repository.GetFile(ctx, version, metadataFile) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from the repository for provider %q", metadataFile, f.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", metadataFile, "Provider", f.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", metadataFile, "provider", f.provider.ManifestLabel(), "version", version) } // Convert the yaml into a typed object diff --git a/cmd/clusterctl/client/repository/overrides.go b/cmd/clusterctl/client/repository/overrides.go index bd14f035d61b..a791c055f190 100644 --- a/cmd/clusterctl/client/repository/overrides.go +++ b/cmd/clusterctl/client/repository/overrides.go @@ -111,7 +111,7 @@ func getLocalOverride(info *newOverrideInput) ([]byte, error) { log := logf.Log overridePath, err := newOverride(info).Path() - log.V(5).Info("Potential override file", "SearchFile", overridePath, "Provider", info.provider.ManifestLabel(), "Version", info.version) + log.V(5).Info("Potential override file", "searchFile", overridePath, "provider", info.provider.ManifestLabel(), "version", info.version) if err != nil { return nil, err diff --git a/cmd/clusterctl/client/repository/template_client.go b/cmd/clusterctl/client/repository/template_client.go index a427c47086a1..ea6a7d8a41d4 100644 --- a/cmd/clusterctl/client/repository/template_client.go +++ b/cmd/clusterctl/client/repository/template_client.go @@ -90,13 +90,13 @@ func (c *templateClient) Get(ctx context.Context, flavor, targetNamespace string } if rawArtifact == nil { - log.V(5).Info("Fetching", "File", name, "Provider", c.provider.Name(), "Type", c.provider.Type(), "Version", version) + log.V(5).Info("Fetching", "file", name, "provider", c.provider.Name(), "type", c.provider.Type(), "version", version) rawArtifact, err = c.repository.GetFile(ctx, version, name) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", name, c.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", name, "Provider", c.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", name, "provider", c.provider.ManifestLabel(), "version", version) } return NewTemplate(TemplateInput{ diff --git a/cmd/clusterctl/cmd/version_checker.go b/cmd/clusterctl/cmd/version_checker.go index 5bea4db6f396..7876e4e33a6d 100644 --- a/cmd/clusterctl/cmd/version_checker.go +++ b/cmd/clusterctl/cmd/version_checker.go @@ -124,14 +124,14 @@ func (v *versionChecker) Check(ctx context.Context) (string, error) { // if we are using a dirty dev build, just log it out if strings.HasSuffix(cliVer.String(), "-dirty") { - log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version) + log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version) return "", nil } // if the cli version is a dev build off of the latest available release, // the just log it out as informational. if strings.HasPrefix(cliVer.String(), latestVersion.String()) && gitVersionRegEx.MatchString(cliVer.String()) { - log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version) + log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version) return "", nil } diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index 4b0d889d3ce9..b2f68387ad62 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -580,7 +580,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error if accessor.watches.Has(input.Name) { log := ctrl.LoggerFrom(ctx) - log.V(6).Info("Watch already exists", "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name) + log.V(6).Info(fmt.Sprintf("Watch %s already exists", input.Name), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) return nil } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index ae6f8325e318..030c7880e6a9 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -206,7 +206,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. if err := r.updateStatus(ctx, controlPlane); err != nil { var connFailure *internal.RemoteClusterConnectionError if errors.As(err, &connFailure) { - log.Info("Could not connect to workload cluster to fetch status", "err", err.Error()) + log.Error(err, "Could not connect to workload cluster to fetch status") } else { log.Error(err, "Failed to update KubeadmControlPlane Status") reterr = kerrors.NewAggregate([]error{reterr, err}) @@ -273,7 +273,7 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex if !cluster.Status.InfrastructureReady || !cluster.Spec.ControlPlaneEndpoint.IsValid() { controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, collections.Machines{}) if err != nil { - log.Error(err, "failed to initialize control plane scope") + log.Error(err, "Failed to initialize control plane scope") return nil, false, err } return controlPlane, false, nil @@ -282,7 +282,7 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex // Read control plane machines controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines(cluster.Name)) if err != nil { - log.Error(err, "failed to retrieve control plane machines for cluster") + log.Error(err, "Failed to retrieve control plane machines for cluster") return nil, false, err } @@ -301,7 +301,7 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, ownedMachines) if err != nil { - log.Error(err, "failed to initialize control plane scope") + log.Error(err, "Failed to initialize control plane scope") return nil, false, err } return controlPlane, false, nil @@ -367,7 +367,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // Generate Cluster Kubeconfig if needed if result, err := r.reconcileKubeconfig(ctx, controlPlane); !result.IsZero() || err != nil { if err != nil { - log.Error(err, "failed to reconcile Kubeconfig") + log.Error(err, "Failed to reconcile Kubeconfig") } return result, err } @@ -426,17 +426,17 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // We are creating the first replica case numMachines < desiredReplicas && numMachines == 0: // Create new Machine w/ init - log.Info("Initializing control plane", "Desired", desiredReplicas, "Existing", numMachines) + log.Info("Initializing control plane", "desired", desiredReplicas, "existing", numMachines) conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") return r.initializeControlPlane(ctx, controlPlane) // We are scaling up case numMachines < desiredReplicas && numMachines > 0: // Create a new Machine w/ join - log.Info("Scaling up control plane", "Desired", desiredReplicas, "Existing", numMachines) + log.Info("Scaling up control plane", "desired", desiredReplicas, "existing", numMachines) return r.scaleUpControlPlane(ctx, controlPlane) // We are scaling down case numMachines > desiredReplicas: - log.Info("Scaling down control plane", "Desired", desiredReplicas, "Existing", numMachines) + log.Info("Scaling down control plane", "desired", desiredReplicas, "existing", numMachines) // The last parameter (i.e. machines needing to be rolled out) should always be empty here. return r.scaleDownControlPlane(ctx, controlPlane, collections.Machines{}) } @@ -462,7 +462,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // Update kube-proxy daemonset. if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, controlPlane.KCP, parsedVersion); err != nil { - log.Error(err, "failed to update kube-proxy daemonset") + log.Error(err, "Failed to update kube-proxy daemonset") return ctrl.Result{}, err } @@ -495,7 +495,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context certificates := secret.NewCertificatesForInitialControlPlane(config.ClusterConfiguration) controllerRef := metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) if err := certificates.LookupOrGenerateCached(ctx, r.SecretCachingClient, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { - log.Error(err, "unable to lookup or create cluster certificates") + log.Error(err, "Unable to lookup or create cluster certificates") conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return err } @@ -524,7 +524,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con // Updates conditions reporting the status of static pods and the status of the etcd cluster. // NOTE: Ignoring failures given that we are deleting if err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil { - log.Info("failed to reconcile conditions", "error", err.Error()) + log.Error(err, "Failed to reconcile conditions") } // Aggregate the operational state of all the machines; while aggregating we are adding the diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 45def5207fbd..f7e3a93f758d 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -91,7 +91,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, } if needsRotation { - log.Info("rotating kubeconfig secret") + log.Info("Rotating kubeconfig secret") if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to regenerate kubeconfig") } diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 28f54b5538a3..98f9abea3482 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -143,7 +143,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance. if controlPlane.Machines.Len() <= 1 { - log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "Replicas", controlPlane.Machines.Len()) + log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", controlPlane.Machines.Len()) conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") return ctrl.Result{}, nil } @@ -312,7 +312,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin var retryForSameMachineInProgress bool if lastRemediationTime.Add(minHealthyPeriod).After(reconciliationTime) { retryForSameMachineInProgress = true - log = log.WithValues("RemediationRetryFor", klog.KRef(machineToBeRemediated.Namespace, lastRemediationData.Machine)) + log = log.WithValues("remediationRetryFor", klog.KRef(machineToBeRemediated.Namespace, lastRemediationData.Machine)) } // If the retry for the same machine is not in progress, this is the first try of a new retry sequence. @@ -412,7 +412,7 @@ func (r *KubeadmControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co // // NOTE: This should not happen given that KCP is running reconcileEtcdMembers before calling this method. if machine == nil { - log.Info("An etcd member does not have a corresponding machine, assuming this member is unhealthy", "MemberName", etcdMember) + log.Info("An etcd member does not have a corresponding machine, assuming this member is unhealthy", "memberName", etcdMember) targetUnhealthyMembers++ unhealthyMembers = append(unhealthyMembers, fmt.Sprintf("%s (no machine)", etcdMember)) continue diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 8dc44889fe41..c68f894ae155 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -155,7 +155,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con // If there are deleting machines, wait for the operation to complete. if controlPlane.HasDeletingMachine() { - logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) + logger.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go index 651d0c2a798c..ae9823d6154d 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ b/controlplane/kubeadm/internal/controllers/upgrade.go @@ -21,12 +21,12 @@ import ( "github.com/blang/semver/v4" "github.com/pkg/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/version" ) @@ -46,7 +46,7 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - logger.Error(err, "failed to get remote client for workload cluster", "cluster key", util.ObjectKey(controlPlane.Cluster)) + logger.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) return ctrl.Result{}, err } diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index 71215f9df0ef..34c84c077927 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -288,7 +288,7 @@ func main() { setupReconcilers(ctx, mgr) setupWebhooks(mgr) - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) diff --git a/docs/book/src/developer/logging.md b/docs/book/src/developer/logging.md index da39f7531c65..5ab476189baa 100644 --- a/docs/book/src/developer/logging.md +++ b/docs/book/src/developer/logging.md @@ -84,15 +84,20 @@ one of the above practices is really important for Cluster API developers - Developers MUST use `klog.KObj` or `klog.KRef` functions when logging key value pairs for Kubernetes objects, thus ensuring a key value pair representing a Kubernetes object is formatted consistently in all the logs. +- Developers MUST use consistent log keys: + - kinds should be written in upper camel case, e.g. `MachineDeployment`, `MachineSet` + - Note: we cannot use lower camel case for kinds consistently because there is no way to + automatically calculate the correct log key for provider CRDs like `AWSCluster` + - all other keys should use lower camel case, e.g. `resourceVersion`, `oldReplicas` to align to Kubernetes log conventions Please note that, in order to ensure logs can be easily searched it is important to ensure consistency for the following key value pairs (in order of importance): -- Key value pairs identifying the object being reconciled, e.g. a Machine Deployment. -- Key value pairs identifying the hierarchy of objects being reconciled, e.g. the Cluster a Machine Deployment belongs +- Key value pairs identifying the object being reconciled, e.g. a MachineDeployment. +- Key value pairs identifying the hierarchy of objects being reconciled, e.g. the Cluster a MachineDeployment belongs to. - Key value pairs identifying side effects on other objects, e.g. while reconciling a MachineDeployment, the controller - creates a MachinesSet. + creates a MachineSet. - Other Key value pairs. ## Log Messages diff --git a/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md b/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md index 836532204490..2ae285e3cfa7 100644 --- a/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md +++ b/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md @@ -232,7 +232,7 @@ if err = (&controllers.MailgunClusterReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("MailgunCluster"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "MailgunCluster") + setupLog.Error(err, "Unable to create controller", "controller", "MailgunCluster") os.Exit(1) } ``` @@ -267,7 +267,7 @@ if err = (&controllers.MailgunClusterReconciler{ Mailgun: mg, Recipient: recipient, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "MailgunCluster") + setupLog.Error(err, "Unable to create controller", "controller", "MailgunCluster") os.Exit(1) } ``` diff --git a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md index d583d9ca0488..1265600970c6 100644 --- a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md +++ b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md @@ -97,14 +97,14 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set default log level") + setupLog.Error(err, "Failed to set default log level") os.Exit(1) } pflag.Parse() // Validates logs flags using Kubernetes component-base machinery and applies them if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { - setupLog.Error(err, "unable to start extension") + setupLog.Error(err, "Unable to start extension") os.Exit(1) } @@ -126,7 +126,7 @@ func main() { CertDir: webhookCertDir, }) if err != nil { - setupLog.Error(err, "error creating webhook server") + setupLog.Error(err, "Error creating webhook server") os.Exit(1) } @@ -136,7 +136,7 @@ func main() { Name: "before-cluster-create", HandlerFunc: DoBeforeClusterCreate, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{ @@ -144,7 +144,7 @@ func main() { Name: "before-cluster-upgrade", HandlerFunc: DoBeforeClusterUpgrade, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -154,7 +154,7 @@ func main() { // Start the https server. setupLog.Info("Starting Runtime Extension server") if err := webhookServer.Start(ctx); err != nil { - setupLog.Error(err, "error running webhook server") + setupLog.Error(err, "Error running webhook server") os.Exit(1) } } diff --git a/docs/proposals/20220221-runtime-SDK.md b/docs/proposals/20220221-runtime-SDK.md index cc924ef9de64..096b0088b62d 100644 --- a/docs/proposals/20220221-runtime-SDK.md +++ b/docs/proposals/20220221-runtime-SDK.md @@ -579,7 +579,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { UnstructuredCachingClient: unstructuredCachingClient, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(clusterTopologyConcurrency)); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ClusterTopology") + setupLog.Error(err, "Unable to create controller", "controller", "ClusterTopology") os.Exit(1) } ... diff --git a/exp/addons/internal/controllers/clusterresourceset_controller.go b/exp/addons/internal/controllers/clusterresourceset_controller.go index a9febf3242f9..187f20b6e335 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller.go @@ -209,7 +209,7 @@ func (r *ClusterResourceSetReconciler) reconcileDelete(ctx context.Context, clus // attempt to Patch the ClusterResourceSetBinding object after delete reconciliation if there is at least 1 binding left. if len(clusterResourceSetBinding.Spec.Bindings) == 0 { if r.Client.Delete(ctx, clusterResourceSetBinding) != nil { - log.Error(err, "failed to delete empty ClusterResourceSetBinding") + log.Error(err, "Failed to delete empty ClusterResourceSetBinding") } } else if err := patchHelper.Patch(ctx, clusterResourceSetBinding); err != nil { return err @@ -289,7 +289,7 @@ func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Conte defer func() { // Always attempt to Patch the ClusterResourceSetBinding object after each reconciliation. if err := patchHelper.Patch(ctx, clusterResourceSetBinding); err != nil { - log.Error(err, "failed to patch config") + log.Error(err, "Failed to patch config") } }() @@ -359,7 +359,7 @@ func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Conte isSuccessful := true if err := resourceScope.apply(ctx, remoteClient); err != nil { isSuccessful = false - log.Error(err, "failed to apply ClusterResourceSet resource", "Resource kind", resource.Kind, "Resource name", resource.Name) + log.Error(err, "Failed to apply ClusterResourceSet resource", resource.Kind, klog.KRef(clusterResourceSet.Namespace, resource.Name)) conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ApplyFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) errList = append(errList, err) } diff --git a/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go b/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go index 93ac69079ca9..ab593428a073 100644 --- a/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go +++ b/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go @@ -85,7 +85,7 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req if err != nil { if apierrors.IsNotFound(err) { // If the owner cluster is already deleted, delete its ClusterResourceSetBinding - log.Info("deleting ClusterResourceSetBinding because the owner Cluster no longer exists") + log.Info("Deleting ClusterResourceSetBinding because the owner Cluster no longer exists") return ctrl.Result{}, r.Client.Delete(ctx, binding) } return ctrl.Result{}, err @@ -98,7 +98,7 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req return ctrl.Result{}, nil } } - log.Info("deleting ClusterResourceSetBinding because the owner Cluster is currently being deleted") + log.Info("Deleting ClusterResourceSetBinding because the owner Cluster is currently being deleted") return ctrl.Result{}, r.Client.Delete(ctx, binding) } diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index c6caee55fe03..e45fe3d6b76a 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -97,7 +98,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * mp.Status.UnavailableReplicas = mp.Status.Replicas - mp.Status.AvailableReplicas mp.Status.NodeRefs = nodeRefsResult.references - log.Info("Set MachinePools's NodeRefs", "noderefs", mp.Status.NodeRefs) + log.Info("Set MachinePool's NodeRefs", "nodeRefs", mp.Status.NodeRefs) r.recorder.Event(mp, corev1.EventTypeNormal, "SuccessfulSetNodeRefs", fmt.Sprintf("%+v", mp.Status.NodeRefs)) // Reconcile node annotations and taints. @@ -107,7 +108,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * } if mp.Status.Replicas != mp.Status.ReadyReplicas || len(nodeRefsResult.references) != int(mp.Status.ReadyReplicas) { - log.Info("NodeRefs != ReadyReplicas", "NodeRefs", len(nodeRefsResult.references), "ReadyReplicas", mp.Status.ReadyReplicas) + log.Info("NodeRefs != ReadyReplicas", "nodeRefs", len(nodeRefsResult.references), "readyReplicas", mp.Status.ReadyReplicas) conditions.MarkFalse(mp, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } @@ -126,7 +127,7 @@ func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client for _, nodeRef := range nodeRefs { node := &corev1.Node{} if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil { - log.V(2).Info("Failed to get Node, skipping", "err", err, "nodeRef.Name", nodeRef.Name) + log.V(2).Error(err, "Failed to get Node, skipping", "Node", klog.KRef("", nodeRef.Name)) continue } @@ -209,7 +210,7 @@ func (r *MachinePoolReconciler) patchNodes(ctx context.Context, c client.Client, for _, nodeRef := range references { node := &corev1.Node{} if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil { - log.V(2).Info("Failed to get Node, skipping setting annotations", "err", err, "nodeRef.Name", nodeRef.Name) + log.V(2).Error(err, "Failed to get Node, skipping setting annotations", "Node", klog.KRef("", nodeRef.Name)) continue } patchHelper, err := patch.NewHelper(node, c) @@ -228,7 +229,7 @@ func (r *MachinePoolReconciler) patchNodes(ctx context.Context, c client.Client, // Patch the node if needed. if hasAnnotationChanges || hasTaintChanges { if err := patchHelper.Patch(ctx, node); err != nil { - log.V(2).Info("Failed patch Node to set annotations and drop taints", "err", err, "node name", node.Name) + log.V(2).Error(err, "Failed patch Node to set annotations and drop taints", "Node", klog.KObj(node)) return err } } diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 1c7a9bb32cfb..a96639afec15 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -400,7 +400,7 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp * infraMachine := &infraMachines[i] // If infraMachine already has a Machine, update it if needed. if existingMachine, ok := infraMachineToMachine[infraMachine.GetName()]; ok { - log.V(2).Info("Patching existing Machine for infraMachine", "infraMachine", klog.KObj(infraMachine), "machine", klog.KObj(&existingMachine)) + log.V(2).Info("Patching existing Machine for infraMachine", infraMachine.GetKind(), klog.KObj(infraMachine), "Machine", klog.KObj(&existingMachine)) desiredMachine := computeDesiredMachine(mp, infraMachine, &existingMachine) if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: &existingMachine}); err != nil { @@ -488,7 +488,7 @@ func (r *MachinePoolReconciler) infraMachineToMachinePoolMapper(ctx context.Cont if labels.IsMachinePoolOwned(o) { machinePool, err := utilexp.GetMachinePoolByLabels(ctx, r.Client, o.GetNamespace(), o.GetLabels()) if err != nil { - log.Error(err, "failed to get MachinePool for InfraMachine", "infraMachine", klog.KObj(o), "labels", o.GetLabels()) + log.Error(err, "Failed to get MachinePool for InfraMachine", o.GetObjectKind().GroupVersionKind().Kind, klog.KObj(o), "labels", o.GetLabels()) return nil } if machinePool != nil { diff --git a/exp/ipam/internal/webhooks/ipaddress.go b/exp/ipam/internal/webhooks/ipaddress.go index efd1f56e1cd7..a500a542b6b8 100644 --- a/exp/ipam/internal/webhooks/ipaddress.go +++ b/exp/ipam/internal/webhooks/ipaddress.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -146,7 +147,7 @@ func (webhook *IPAddress) validate(ctx context.Context, ip *ipamv1.IPAddress) er claim := &ipamv1.IPAddressClaim{} err = webhook.Client.Get(ctx, types.NamespacedName{Name: ip.Spec.ClaimRef.Name, Namespace: ip.ObjectMeta.Namespace}, claim) if err != nil && !apierrors.IsNotFound(err) { - log.Error(err, "failed to fetch claim", "name", ip.Spec.ClaimRef.Name) + log.Error(err, "Failed to fetch claim", "IPAddressClaim", klog.KRef(ip.ObjectMeta.Namespace, ip.Spec.ClaimRef.Name)) allErrs = append(allErrs, field.InternalError( specPath.Child("claimRef"), diff --git a/exp/util/util.go b/exp/util/util.go index cef656984454..aa201cc4d846 100644 --- a/exp/util/util.go +++ b/exp/util/util.go @@ -20,11 +20,11 @@ package util import ( "context" - "github.com/go-logr/logr" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -89,8 +89,8 @@ func GetMachinePoolByLabels(ctx context.Context, c client.Client, namespace stri // MachinePoolToInfrastructureMapFunc returns a handler.MapFunc that watches for // MachinePool events and returns reconciliation requests for an infrastructure provider object. -func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { - log = log.WithValues("machine-pool-to-infra-map-func", gvk.String()) +func MachinePoolToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersionKind) handler.MapFunc { + log := ctrl.LoggerFrom(ctx) return func(_ context.Context, o client.Object) []reconcile.Request { m, ok := o.(*expv1.MachinePool) if !ok { diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 7654b788aa8e..2c8b1b51ff23 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -30,8 +30,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -249,12 +251,18 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // Don't handle deleted child continue } - gvk := child.GetObjectKind().GroupVersionKind().String() - log.Info("Deleting child object", "gvk", gvk, "name", child.GetName()) + gvk, err := apiutil.GVKForObject(child, r.Client.Scheme()) + if err != nil { + errs = append(errs, errors.Wrapf(err, "error getting gvk for child object")) + continue + } + + log := log.WithValues(gvk.Kind, klog.KObj(child)) + log.Info("Deleting child object") if err := r.Client.Delete(ctx, child); err != nil { err = errors.Wrapf(err, "error deleting cluster %s/%s: failed to delete %s %s", cluster.Namespace, cluster.Name, gvk, child.GetName()) - log.Error(err, "Error deleting resource", "gvk", gvk, "name", child.GetName()) + log.Error(err, "Error deleting resource") errs = append(errs, err) } } diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index fb2668c5632a..9b63035b3c47 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -575,7 +575,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 if apierrors.IsNotFound(err) { // If control plane object in the reference does not exist, log and skip check for // external managed control plane - log.Error(err, "control plane object specified in cluster spec.controlPlaneRef does not exist", "kind", cluster.Spec.ControlPlaneRef.Kind, "name", cluster.Spec.ControlPlaneRef.Name) + log.Error(err, "Control plane object specified in cluster spec.controlPlaneRef does not exist", cluster.Spec.ControlPlaneRef.Kind, klog.KRef(cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)) } else { if err != nil { // If any other error occurs when trying to get the control plane object, diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 719e7d41138e..dd28099d3d40 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -94,7 +94,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, Name: node.Name, UID: node.UID, } - log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", *machine.Spec.ProviderID, "node", klog.KRef("", machine.Status.NodeRef.Name)) + log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", *machine.Spec.ProviderID, "Node", klog.KRef("", machine.Status.NodeRef.Name)) r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) } diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go index 8f7ae1646b3f..061af36f4ab4 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go @@ -127,7 +127,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs } machineSetScaleDownAmountDueToMachineDeletion := *oldMS.Spec.Replicas - updatedReplicaCount if machineSetScaleDownAmountDueToMachineDeletion < 0 { - log.V(4).Error(errors.Errorf("unexpected negative scale down amount: %d", machineSetScaleDownAmountDueToMachineDeletion), fmt.Sprintf("Error reconciling MachineSet %s", oldMS.Name)) + log.V(4).Error(errors.Errorf("Unexpected negative scale down amount: %d", machineSetScaleDownAmountDueToMachineDeletion), fmt.Sprintf("Error reconciling MachineSet %s", oldMS.Name)) } scaleDownAmount -= machineSetScaleDownAmountDueToMachineDeletion log.V(4).Info("Adjusting replica count for deleted machines", "oldReplicas", oldMS.Spec.Replicas, "newReplicas", updatedReplicaCount) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 18f0bbe99274..791c74941266 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -630,7 +630,7 @@ func (r *Reconciler) cleanupDeployment(ctx context.Context, oldMSs []*clusterv1. continue } - log.V(4).Info("Trying to cleanup machine set for deployment", "machineset", ms.Name) + log.V(4).Info("Trying to cleanup machine set for deployment", "MachineSet", klog.KObj(ms)) if err := r.Client.Delete(ctx, ms); err != nil && !apierrors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 08879720d839..06930d03d8f7 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -61,10 +61,10 @@ const ( // is restricted by remediation circuit shorting logic. EventRemediationRestricted string = "RemediationRestricted" - maxUnhealthyKeyLog = "max unhealthy" - unhealthyTargetsKeyLog = "unhealthy targets" - unhealthyRangeKeyLog = "unhealthy range" - totalTargetKeyLog = "total target" + maxUnhealthyKeyLog = "maxUnhealthy" + unhealthyTargetsKeyLog = "unhealthyTargets" + unhealthyRangeKeyLog = "unhealthyRange" + totalTargetKeyLog = "totalTarget" ) // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch @@ -202,7 +202,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster var err error remoteClient, err = r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { - logger.Error(err, "error creating remote cluster cache") + logger.Error(err, "Error creating remote cluster cache") return ctrl.Result{}, err } @@ -328,7 +328,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster } if minNextCheck := minDuration(nextCheckTimes); minNextCheck > 0 { - logger.V(3).Info("Some targets might go unhealthy. Ensuring a requeue happens", "requeueIn", minNextCheck.Truncate(time.Second).String()) + logger.V(3).Info("Some targets might go unhealthy. Ensuring a requeue happens", "requeueAfter", minNextCheck.Truncate(time.Second).String()) return ctrl.Result{RequeueAfter: minNextCheck}, nil } @@ -362,7 +362,7 @@ func (r *Reconciler) patchHealthyTargets(ctx context.Context, logger logr.Logger } if err := t.patchHelper.Patch(ctx, t.Machine); err != nil { - logger.Error(err, "failed to patch healthy machine status for machine", "machine", t.Machine.GetName()) + logger.Error(err, "failed to patch healthy machine status for machine", "Machine", klog.KObj(t.Machine)) errList = append(errList, errors.Wrapf(err, "failed to patch healthy machine status for machine: %s/%s", t.Machine.Namespace, t.Machine.Name)) } } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index f7677ae4d08d..d7f34e75fb5f 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -213,7 +213,7 @@ func (r *Reconciler) getTargetsFromMHC(ctx context.Context, logger logr.Logger, logger := logger.WithValues("Machine", klog.KObj(&machines[k])) skip, reason := shouldSkipRemediation(&machines[k]) if skip { - logger.Info("skipping remediation", "reason", reason) + logger.Info("Skipping remediation", "reason", reason) continue } @@ -293,7 +293,7 @@ func (r *Reconciler) healthCheckTargets(targets []healthCheckTarget, logger logr var healthy []healthCheckTarget for _, t := range targets { - logger := logger.WithValues("Target", t.string()) + logger := logger.WithValues("target", t.string()) logger.V(3).Info("Health checking target") needsRemediation, nextCheck := t.needsRemediation(logger, timeoutForMachineToHaveNode) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index d7ca98c93ac9..aadce8caca5e 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -382,7 +382,7 @@ func (r *Reconciler) syncMachines(ctx context.Context, machineSet *clusterv1.Mac updatedMachine := r.computeDesiredMachine(machineSet, m) err := ssa.Patch(ctx, r.Client, machineSetManagerName, updatedMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: m}) if err != nil { - log.Error(err, "failed to update Machine", "Machine", klog.KObj(updatedMachine)) + log.Error(err, "Failed to update Machine", "Machine", klog.KObj(updatedMachine)) return errors.Wrapf(err, "failed to update Machine %q", klog.KObj(updatedMachine)) } machines[i] = updatedMachine @@ -890,7 +890,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste node, err := r.getMachineNode(ctx, cluster, machine) if err != nil && machine.GetDeletionTimestamp().IsZero() { - log.Error(err, "Unable to retrieve Node status", "node", klog.KObj(node)) + log.Error(err, "Unable to retrieve Node status", "Node", klog.KObj(node)) continue } diff --git a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper.go b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper.go index d4d33c782c52..f5aa4d9d4b31 100644 --- a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper.go +++ b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper.go @@ -131,7 +131,7 @@ func (h *serverSidePatchHelper) Patch(ctx context.Context) error { } log := ctrl.LoggerFrom(ctx) - log.V(5).Info("Patching object", "Intent", h.modified) + log.V(5).Info("Patching object", "intent", h.modified) options := []client.PatchOption{ client.FieldOwner(TopologyManagerName), diff --git a/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go b/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go index 8d1a3deaea75..720764ec8ecc 100644 --- a/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go +++ b/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go @@ -225,6 +225,6 @@ func (h *TwoWaysPatchHelper) Patch(ctx context.Context) error { } // Note: deepcopy before patching in order to avoid modifications to the original object. - log.V(5).Info("Patching object", "Patch", string(h.patch)) + log.V(5).Info("Patching object", "patch", string(h.patch)) return h.client.Patch(ctx, h.original.DeepCopyObject().(client.Object), client.RawPatch(types.MergePatchType, h.patch)) } diff --git a/internal/log/log.go b/internal/log/log.go index 1ba151f0c5ba..ca349e6e4b36 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -122,7 +122,7 @@ func (l *topologyReconcileLogger) WithMachineDeployment(md *clusterv1.MachineDep return &topologyReconcileLogger{ Logger: l.Logger.WithValues( "MachineDeployment", klog.KObj(md), - "MachineDeploymentTopology", topologyName, + "machineDeploymentTopology", topologyName, ), } } @@ -133,7 +133,7 @@ func (l *topologyReconcileLogger) WithMachinePool(mp *expv1.MachinePool) Logger return &topologyReconcileLogger{ Logger: l.Logger.WithValues( "MachinePool", klog.KObj(mp), - "MachinePoolTopology", topologyName, + "machinePoolTopology", topologyName, ), } } diff --git a/main.go b/main.go index b79e367d3cf8..fcd767d5f495 100644 --- a/main.go +++ b/main.go @@ -361,7 +361,7 @@ func main() { tracker := setupReconcilers(ctx, mgr) setupWebhooks(mgr, tracker) - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index e3bf87636f5e..ec3c1e217302 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -90,13 +90,13 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo switch obj := obj.(type) { case *infrav1.DockerClusterTemplate: if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil { - log.Error(err, "error patching DockerClusterTemplate") + log.Error(err, "Error patching DockerClusterTemplate") return errors.Wrap(err, "error patching DockerClusterTemplate") } case *controlplanev1.KubeadmControlPlaneTemplate: err := patchKubeadmControlPlaneTemplate(ctx, obj, variables) if err != nil { - log.Error(err, "error patching KubeadmControlPlaneTemplate") + log.Error(err, "Error patching KubeadmControlPlaneTemplate") return errors.Wrapf(err, "error patching KubeadmControlPlaneTemplate") } case *bootstrapv1.KubeadmConfigTemplate: @@ -105,7 +105,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // linked to a specific MachineDeployment class; another option is to check the holderRef value and call // this func or more specialized func conditionally. if err := patchKubeadmConfigTemplate(ctx, obj, variables); err != nil { - log.Error(err, "error patching KubeadmConfigTemplate") + log.Error(err, "Error patching KubeadmConfigTemplate") return errors.Wrap(err, "error patching KubeadmConfigTemplate") } case *infrav1.DockerMachineTemplate: @@ -114,12 +114,12 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // linked to ControlPlane or for DockerMachineTemplate linked to MachineDeployment classes; another option // is to check the holderRef value and call this func or more specialized func conditionally. if err := patchDockerMachineTemplate(ctx, obj, variables); err != nil { - log.Error(err, "error patching DockerMachineTemplate") + log.Error(err, "Error patching DockerMachineTemplate") return errors.Wrap(err, "error patching DockerMachineTemplate") } case *infraexpv1.DockerMachinePoolTemplate: if err := patchDockerMachinePoolTemplate(ctx, obj, variables); err != nil { - log.Error(err, "error patching DockerMachinePoolTemplate") + log.Error(err, "Error patching DockerMachinePoolTemplate") return errors.Wrap(err, "error patching DockerMachinePoolTemplate") } } diff --git a/test/extension/main.go b/test/extension/main.go index 8638724bea0f..2a6742bba8d2 100644 --- a/test/extension/main.go +++ b/test/extension/main.go @@ -173,7 +173,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set default log level") + setupLog.Error(err, "Failed to set default log level") os.Exit(1) } pflag.Parse() @@ -182,7 +182,7 @@ func main() { // so klog will automatically use the right logger. // NOTE: klog is the log of choice of component-base machinery. if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { - setupLog.Error(err, "unable to start extension") + setupLog.Error(err, "Unable to start extension") os.Exit(1) } @@ -199,7 +199,7 @@ func main() { tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) if err != nil { - setupLog.Error(err, "unable to add TLS settings to the webhook server") + setupLog.Error(err, "Unable to add TLS settings to the webhook server") os.Exit(1) } @@ -219,7 +219,7 @@ func main() { Catalog: catalog, }) if err != nil { - setupLog.Error(err, "error creating runtime extension webhook server") + setupLog.Error(err, "Error creating runtime extension webhook server") os.Exit(1) } @@ -251,7 +251,7 @@ func main() { // Start the manager mgr, err := ctrl.NewManager(restConfig, ctrlOptions) if err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -270,7 +270,7 @@ func main() { setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") + setupLog.Error(err, "Problem running manager") os.Exit(1) } } @@ -287,7 +287,7 @@ func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Ser Name: "generate-patches", HandlerFunc: topologyMutationExtensionHandlers.GeneratePatches, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -296,7 +296,7 @@ func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Ser Name: "validate-topology", HandlerFunc: topologyMutationExtensionHandlers.ValidateTopology, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -305,7 +305,7 @@ func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Ser Name: "discover-variables", HandlerFunc: topologyMutationExtensionHandlers.DiscoverVariables, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } } @@ -322,7 +322,7 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "before-cluster-create", HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterCreate, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -331,7 +331,7 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "after-control-plane-initialized", HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneInitialized, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -340,7 +340,7 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "before-cluster-upgrade", HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterUpgrade, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -349,7 +349,7 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "after-control-plane-upgrade", HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneUpgrade, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -358,7 +358,7 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "after-cluster-upgrade", HandlerFunc: lifecycleExtensionHandlers.DoAfterClusterUpgrade, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -367,19 +367,19 @@ func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer Name: "before-cluster-delete", HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterDelete, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } } func setupChecks(mgr ctrl.Manager) { if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create ready check") + setupLog.Error(err, "Unable to create ready check") os.Exit(1) } if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create health check") + setupLog.Error(err, "Unable to create health check") os.Exit(1) } } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index a39046f503a3..6d43ef153783 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -132,7 +132,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re // Always attempt to Patch the DockerMachinePool object and status after each reconciliation. defer func() { if err := patchDockerMachinePool(ctx, patchHelper, dockerMachinePool); err != nil { - log.Error(err, "failed to patch DockerMachinePool") + log.Error(err, "Failed to patch DockerMachinePool") if rerr == nil { rerr = err } @@ -177,8 +177,8 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Watches( &expv1.MachinePool{}, - handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc( - infraexpv1.GroupVersion.WithKind("DockerMachinePool"), ctrl.LoggerFrom(ctx))), + handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc(ctx, + infraexpv1.GroupVersion.WithKind("DockerMachinePool"))), ). Watches( &infrav1.DockerMachine{}, diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go index 7e4726246641..6172599496c2 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go @@ -51,7 +51,7 @@ import ( func (r *DockerMachinePoolReconciler) reconcileDockerContainers(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) error { log := ctrl.LoggerFrom(ctx) - log.V(2).Info("Reconciling Docker containers", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace) + log.V(2).Info("Reconciling Docker containers", "DockerMachinePool", klog.KObj(dockerMachinePool)) labelFilters := map[string]string{dockerMachinePoolLabel: dockerMachinePool.Name} @@ -63,7 +63,7 @@ func (r *DockerMachinePoolReconciler) reconcileDockerContainers(ctx context.Cont matchingMachineCount := len(machinesMatchingInfrastructureSpec(ctx, machines, machinePool, dockerMachinePool)) numToCreate := int(*machinePool.Spec.Replicas) - matchingMachineCount for i := 0; i < numToCreate; i++ { - log.V(2).Info("Creating a new Docker container for machinePool", "machinePool", machinePool.Name) + log.V(2).Info("Creating a new Docker container for machinePool", "MachinePool", klog.KObj(machinePool)) name := fmt.Sprintf("worker-%s", util.RandomString(6)) if err := createDockerContainer(ctx, name, cluster, machinePool, dockerMachinePool); err != nil { return errors.Wrap(err, "failed to create a new docker machine") @@ -98,7 +98,7 @@ func createDockerContainer(ctx context.Context, name string, cluster *clusterv1. } } - log.Info("Creating container for machinePool", "name", name, "machinePool", machinePool.Name) + log.Info("Creating container for machinePool", "name", name, "MachinePool", klog.KObj(machinePool)) if err := externalMachine.Create(ctx, dockerMachinePool.Spec.Template.CustomImage, constants.WorkerNodeRoleValue, machinePool.Spec.Template.Spec.Version, labels, dockerMachinePool.Spec.Template.ExtraMounts); err != nil { return errors.Wrapf(err, "failed to create docker machine with name %s", name) } @@ -115,7 +115,7 @@ func createDockerContainer(ctx context.Context, name string, cluster *clusterv1. func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) error { log := ctrl.LoggerFrom(ctx) - log.V(2).Info("Reconciling DockerMachines", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace) + log.V(2).Info("Reconciling DockerMachines", "DockerMachinePool", klog.KObj(dockerMachinePool)) dockerMachineList, err := getDockerMachines(ctx, r.Client, *cluster, *machinePool, *dockerMachinePool) if err != nil { @@ -144,7 +144,7 @@ func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Contex // Providers should iterate through their infrastructure instances and ensure that each instance has a corresponding InfraMachine. for _, machine := range externalMachines { if existingMachine, ok := dockerMachineMap[machine.Name()]; ok { - log.V(2).Info("Patching existing DockerMachine", "name", existingMachine.Name) + log.V(2).Info("Patching existing DockerMachine", "DockerMachine", klog.KObj(&existingMachine)) desiredMachine := computeDesiredDockerMachine(machine.Name(), cluster, machinePool, dockerMachinePool, &existingMachine) if err := ssa.Patch(ctx, r.Client, dockerMachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: &existingMachine}); err != nil { return errors.Wrapf(err, "failed to update DockerMachine %q", klog.KObj(desiredMachine)) @@ -168,7 +168,8 @@ func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Contex // This allows the InfraMachine (and owner Machine) to be deleted and avoid hanging resources when a user deletes an instance out-of-band. for _, dockerMachine := range dockerMachineMap { if _, ok := externalMachineMap[dockerMachine.Name]; !ok { - log.V(2).Info("Deleting DockerMachine with no underlying infrastructure", "dockerMachine", dockerMachine.Name) + dockerMachine := dockerMachine + log.V(2).Info("Deleting DockerMachine with no underlying infrastructure", "DockerMachine", klog.KObj(&dockerMachine)) if err := r.deleteMachinePoolMachine(ctx, dockerMachine); err != nil { return err } @@ -216,7 +217,8 @@ func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Contex // Loop through outdated DockerMachines first and decrement the overProvisionCount until it reaches 0. for _, dockerMachine := range outdatedMachines { if overProvisionCount > 0 { - log.V(2).Info("Deleting DockerMachine because it is outdated", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + dockerMachine := dockerMachine + log.V(2).Info("Deleting DockerMachine because it is outdated", "DockerMachine", klog.KObj(&dockerMachine)) if err := r.deleteMachinePoolMachine(ctx, dockerMachine); err != nil { return err } @@ -228,7 +230,8 @@ func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Contex // Then, loop through the ready DockerMachines first and decrement the overProvisionCount until it reaches 0. for _, dockerMachine := range readyMachines { if overProvisionCount > 0 { - log.V(2).Info("Deleting DockerMachine because it is an excess replica", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + dockerMachine := dockerMachine + log.V(2).Info("Deleting DockerMachine because it is an excess replica", "DockerMachine", klog.KObj(&dockerMachine)) if err := r.deleteMachinePoolMachine(ctx, dockerMachine); err != nil { return err } @@ -297,7 +300,7 @@ func (r *DockerMachinePoolReconciler) deleteMachinePoolMachine(ctx context.Conte return nil } - log.Info("Deleting Machine for DockerMachine", "machine", klog.KObj(machine), "dockerMachine", klog.KObj(&dockerMachine)) + log.Info("Deleting Machine for DockerMachine", "Machine", klog.KObj(machine), "DockerMachine", klog.KObj(&dockerMachine)) if err := r.Client.Delete(ctx, machine); err != nil { return errors.Wrapf(err, "failed to delete Machine %s/%s", machine.Namespace, machine.Name) diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index b5ace259e65c..a701040f9227 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -88,7 +88,7 @@ func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Always attempt to Patch the DockerCluster object and status after each reconciliation. defer func() { if err := patchDockerCluster(ctx, patchHelper, dockerCluster); err != nil { - log.Error(err, "failed to patch DockerCluster") + log.Error(err, "Failed to patch DockerCluster") if rerr == nil { rerr = err } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 8b517275acfa..e9b987bc8ea2 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -146,7 +146,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Always attempt to Patch the DockerMachine object and status after each reconciliation. defer func() { if err := patchDockerMachine(ctx, patchHelper, dockerMachine); err != nil { - log.Error(err, "failed to patch DockerMachine") + log.Error(err, "Failed to patch DockerMachine") if rerr == nil { rerr = err } @@ -404,7 +404,7 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) if err := setMachineAddress(ctx, dockerMachine, externalMachine); err != nil { - log.Error(err, "failed to set the machine address") + log.Error(err, "Failed to set the machine address") return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } @@ -430,7 +430,7 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * if errors.As(err, &docker.ContainerNotRunningError{}) { return ctrl.Result{}, errors.Wrap(err, "failed to patch the Kubernetes node with the machine providerID") } - log.Error(err, "failed to patch the Kubernetes node with the machine providerID") + log.Error(err, "Failed to patch the Kubernetes node with the machine providerID") return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } // Set ProviderID so the Cluster API Machine Controller can pull it diff --git a/test/infrastructure/docker/internal/docker/machine.go b/test/infrastructure/docker/internal/docker/machine.go index d106e0cd1682..b354ed894d27 100644 --- a/test/infrastructure/docker/internal/docker/machine.go +++ b/test/infrastructure/docker/internal/docker/machine.go @@ -547,7 +547,7 @@ func (m *Machine) Delete(ctx context.Context) error { func logContainerDebugInfo(ctx context.Context, log logr.Logger, name string) { containerRuntime, err := container.RuntimeFrom(ctx) if err != nil { - log.Error(err, "failed to connect to container runtime") + log.Error(err, "Failed to connect to container runtime") return } @@ -561,7 +561,7 @@ func logContainerDebugInfo(ctx context.Context, log logr.Logger, name string) { var buffer bytes.Buffer err = containerRuntime.ContainerDebugInfo(debugCtx, name, &buffer) if err != nil { - log.Error(err, "failed to get logs from the machine container") + log.Error(err, "Failed to get logs from the machine container") return } log.Info("Got logs from the machine container", "output", strings.ReplaceAll(buffer.String(), "\\n", "\n")) diff --git a/test/infrastructure/docker/internal/docker/types/node.go b/test/infrastructure/docker/internal/docker/types/node.go index de220cc0436b..f3ad14777617 100644 --- a/test/infrastructure/docker/internal/docker/types/node.go +++ b/test/infrastructure/docker/internal/docker/types/node.go @@ -109,7 +109,7 @@ func (n *Node) Delete(ctx context.Context) error { var buffer bytes.Buffer err = containerRuntime.ContainerDebugInfo(debugCtx, n.Name, &buffer) if err != nil { - log.Error(err, "failed to get logs from the machine container") + log.Error(err, "Failed to get logs from the machine container") } else { log.Info("Got logs from the machine container", "output", strings.ReplaceAll(buffer.String(), "\\n", "\n")) } diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index bc186ce171d2..64f679b27ec4 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -175,7 +175,7 @@ func InitFlags(fs *pflag.FlagSet) { func main() { if _, err := os.ReadDir("/tmp/"); err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -184,13 +184,13 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set default log level") + setupLog.Error(err, "Failed to set default log level") os.Exit(1) } pflag.Parse() if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -204,7 +204,7 @@ func main() { tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) if err != nil { - setupLog.Error(err, "unable to add TLS settings to the webhook server") + setupLog.Error(err, "Unable to add TLS settings to the webhook server") os.Exit(1) } @@ -268,7 +268,7 @@ func main() { mgr, err := ctrl.NewManager(restConfig, ctrlOptions) if err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -279,21 +279,21 @@ func main() { setupReconcilers(ctx, mgr) setupWebhooks(mgr) - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") + setupLog.Error(err, "Problem running manager") os.Exit(1) } } func setupChecks(mgr ctrl.Manager) { if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create ready check") + setupLog.Error(err, "Unable to create ready check") os.Exit(1) } if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create health check") + setupLog.Error(err, "Unable to create health check") os.Exit(1) } } @@ -306,14 +306,14 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { }, }) if err != nil { - setupLog.Error(err, "unable to create secret caching client") + setupLog.Error(err, "Unable to create secret caching client") os.Exit(1) } // Set our runtime client into the context for later use runtimeClient, err := container.NewDockerClient() if err != nil { - setupLog.Error(err, "unable to establish container runtime connection", "controller", "reconciler") + setupLog.Error(err, "Unable to establish container runtime connection", "controller", "reconciler") os.Exit(1) } @@ -326,7 +326,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { }, ) if err != nil { - setupLog.Error(err, "unable to create cluster cache tracker") + setupLog.Error(err, "Unable to create cluster cache tracker") os.Exit(1) } @@ -337,7 +337,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { }).SetupWithManager(ctx, mgr, controller.Options{ MaxConcurrentReconciles: clusterCacheTrackerConcurrency, }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler") + setupLog.Error(err, "Unable to create controller", "controller", "ClusterCacheReconciler") os.Exit(1) } @@ -349,7 +349,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { }).SetupWithManager(ctx, mgr, controller.Options{ MaxConcurrentReconciles: concurrency, }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "DockerMachine") + setupLog.Error(err, "Unable to create controller", "controller", "DockerMachine") os.Exit(1) } @@ -358,7 +358,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { ContainerRuntime: runtimeClient, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{}); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "DockerCluster") + setupLog.Error(err, "Unable to create controller", "controller", "DockerCluster") os.Exit(1) } @@ -369,7 +369,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { Tracker: tracker, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: concurrency}); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePool") + setupLog.Error(err, "Unable to create controller", "controller", "DockerMachinePool") os.Exit(1) } } @@ -377,23 +377,23 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { func setupWebhooks(mgr ctrl.Manager) { if err := (&infrawebhooks.DockerMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "DockerMachineTemplate") + setupLog.Error(err, "Unable to create webhook", "webhook", "DockerMachineTemplate") os.Exit(1) } if err := (&infrawebhooks.DockerCluster{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "DockerCluster") + setupLog.Error(err, "Unable to create webhook", "webhook", "DockerCluster") os.Exit(1) } if err := (&infrawebhooks.DockerClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "DockerClusterTemplate") + setupLog.Error(err, "Unable to create webhook", "webhook", "DockerClusterTemplate") os.Exit(1) } if feature.Gates.Enabled(feature.MachinePool) { if err := (&infraexpwebhooks.DockerMachinePool{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "DockerMachinePool") + setupLog.Error(err, "Unable to create webhook", "webhook", "DockerMachinePool") os.Exit(1) } } diff --git a/test/infrastructure/inmemory/main.go b/test/infrastructure/inmemory/main.go index 0626f1e78d54..99ac7b337934 100644 --- a/test/infrastructure/inmemory/main.go +++ b/test/infrastructure/inmemory/main.go @@ -174,13 +174,13 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set default log level") + setupLog.Error(err, "Failed to set default log level") os.Exit(1) } pflag.Parse() if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -194,7 +194,7 @@ func main() { tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) if err != nil { - setupLog.Error(err, "unable to add TLS settings to the webhook server") + setupLog.Error(err, "Unable to add TLS settings to the webhook server") os.Exit(1) } @@ -247,7 +247,7 @@ func main() { mgr, err := ctrl.NewManager(restConfig, ctrlOptions) if err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "Unable to start manager") os.Exit(1) } @@ -259,21 +259,21 @@ func main() { setupReconcilers(ctx, mgr) setupWebhooks(mgr) - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") + setupLog.Error(err, "Problem running manager") os.Exit(1) } } func setupChecks(mgr ctrl.Manager) { if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create ready check") + setupLog.Error(err, "Unable to create ready check") os.Exit(1) } if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { - setupLog.Error(err, "unable to create health check") + setupLog.Error(err, "Unable to create health check") os.Exit(1) } } @@ -285,7 +285,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { // Start in memory manager inMemoryManager := inmemoryruntime.NewManager(inmemoryScheme) if err := inMemoryManager.Start(ctx); err != nil { - setupLog.Error(err, "unable to start a in memory manager") + setupLog.Error(err, "Unable to start a in memory manager") os.Exit(1) } @@ -293,7 +293,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { podIP := os.Getenv("POD_IP") apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inMemoryManager, podIP) if err != nil { - setupLog.Error(err, "unable to create workload clusters mux") + setupLog.Error(err, "Unable to create workload clusters mux") os.Exit(1) } @@ -304,7 +304,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { APIServerMux: apiServerMux, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(clusterConcurrency)); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "InMemoryCluster") + setupLog.Error(err, "Unable to create controller", "controller", "InMemoryCluster") os.Exit(1) } @@ -314,29 +314,29 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { APIServerMux: apiServerMux, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(machineConcurrency)); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "InMemoryMachine") + setupLog.Error(err, "Unable to create controller", "controller", "InMemoryMachine") os.Exit(1) } } func setupWebhooks(mgr ctrl.Manager) { if err := (&webhooks.InMemoryCluster{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "InMemoryCluster") + setupLog.Error(err, "Unable to create webhook", "webhook", "InMemoryCluster") os.Exit(1) } if err := (&webhooks.InMemoryClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "InMemoryClusterTemplate") + setupLog.Error(err, "Unable to create webhook", "webhook", "InMemoryClusterTemplate") os.Exit(1) } if err := (&webhooks.InMemoryMachine{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "InMemoryMachine") + setupLog.Error(err, "Unable to create webhook", "webhook", "InMemoryMachine") os.Exit(1) } if err := (&webhooks.InMemoryMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "InMemoryMachineTemplate") + setupLog.Error(err, "Unable to create webhook", "webhook", "InMemoryMachineTemplate") os.Exit(1) } }