From 11e5ac091f86edf7ce1cbe49468f28599cac09b4 Mon Sep 17 00:00:00 2001 From: Ai Ranthem <38308439+AiRanthem@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:57:26 +0800 Subject: [PATCH] support structured logging (#1669) Signed-off-by: AiRanthem Co-authored-by: AiRanthem --- cmd/daemon/main.go | 6 +- .../container_meta_controller.go | 30 ++++---- .../containermeta/container_meta_restarter.go | 8 +- .../crr_daemon_controller.go | 28 +++---- .../containerrecreate/crr_daemon_util.go | 2 +- pkg/daemon/criruntime/factory.go | 29 +++---- pkg/daemon/criruntime/imageruntime/cri.go | 22 +++--- pkg/daemon/criruntime/imageruntime/docker.go | 15 ++-- .../imageruntime/fake_plugin/main.go | 2 +- pkg/daemon/criruntime/imageruntime/helpers.go | 6 +- .../criruntime/imageruntime/helpers_test.go | 2 +- pkg/daemon/criruntime/imageruntime/pouch.go | 14 ++-- pkg/daemon/daemon.go | 2 +- .../imagepuller/imagepuller_controller.go | 16 ++-- pkg/daemon/imagepuller/imagepuller_worker.go | 47 ++++++------ pkg/daemon/imagepuller/utils.go | 6 +- .../kuberuntime/kuberuntime_container.go | 18 ++--- pkg/daemon/kuberuntime/labels.go | 18 ++--- pkg/daemon/podprobe/pod_probe_controller.go | 24 +++--- .../podprobe/pod_probe_controller_test.go | 2 +- pkg/daemon/podprobe/worker.go | 16 ++-- pkg/daemon/util/healthz.go | 4 +- pkg/daemon/util/secret_manager.go | 2 +- pkg/util/client/no_deepcopy_lister.go | 2 +- pkg/util/controllerfinder/pods_finder.go | 2 +- pkg/util/discovery/discovery.go | 6 +- pkg/util/imagejob/imagejob_reader.go | 6 +- .../imagejob/utilfunction/imagejob_util.go | 2 +- pkg/util/inplaceupdate/inplace_update.go | 4 +- .../inplaceupdate/inplace_update_defaults.go | 33 ++++---- pkg/util/lifecycle/lifecycle_utils.go | 4 +- pkg/util/pods.go | 2 +- pkg/util/secret/parse.go | 2 +- pkg/util/tools.go | 4 +- pkg/util/workloadspread/workloadspread.go | 76 +++++++++---------- .../validating/builtin_handlers.go | 4 +- .../cloneset_create_update_handler.go | 2 +- .../cloneset_create_update_handler.go | 2 +- .../validating/crd_handler.go | 4 +- .../mutating/daemonset_mutating_handler.go | 2 +- .../daemonset_create_update_handler.go | 2 +- .../ephemeraljob_create_update_handler.go | 2 +- .../imagelistpulljob_create_update_handler.go | 2 +- .../imagelistpulljob_create_update_handler.go | 2 +- .../imagepulljob_create_update_handler.go | 2 +- .../imagepulljob_create_update_handler.go | 2 +- .../ingress/validating/ingress_handler.go | 2 +- .../namespace/validating/namespace_handler.go | 2 +- .../nodeimage_create_update_handler.go | 2 +- .../nodeimage_create_update_handler.go | 2 +- ...ontainer_launch_priority_initialization.go | 4 +- .../mutating/enhancedlivenessprobe_handler.go | 8 +- .../pod/mutating/persistent_pod_state.go | 6 +- .../pod/mutating/pod_unavailable_budget.go | 2 +- pkg/webhook/pod/mutating/sidecarset.go | 47 ++++++------ pkg/webhook/pod/mutating/workloadspread.go | 2 +- .../validating/pod_create_update_handler.go | 2 +- .../pod/validating/pod_unavailable_budget.go | 6 +- pkg/webhook/pod/validating/workloadspread.go | 10 +-- ...ourcedistribution_create_update_handler.go | 2 +- pkg/webhook/server.go | 8 +- .../service/validating/service_handler.go | 2 +- .../sidecarset_create_update_handler.go | 2 +- .../statefulset_create_update_handler.go | 2 +- .../statefulset_create_update_handler.go | 2 +- .../uniteddeployment_create_update_handler.go | 2 +- .../uniteddeployment_create_update_handler.go | 2 +- .../util/configuration/configuration.go | 4 +- .../util/controller/webhook_controller.go | 26 +++---- pkg/webhook/util/health/checker.go | 8 +- pkg/webhook/util/util.go | 4 +- pkg/webhook/util/writer/secret.go | 6 +- .../validating/workloadspread_validation.go | 2 +- 73 files changed, 332 insertions(+), 323 deletions(-) diff --git a/cmd/daemon/main.go b/cmd/daemon/main.go index a11ee02e9b..594d16d735 100644 --- a/cmd/daemon/main.go +++ b/cmd/daemon/main.go @@ -79,12 +79,12 @@ func main() { if _, err := os.Stat(*pluginConfigFile); err == nil { err = plugin.RegisterCredentialProviderPlugins(*pluginConfigFile, *pluginBinDir) if err != nil { - klog.Errorf("Failed to register credential provider plugins: %v", err) + klog.ErrorS(err, "Failed to register credential provider plugins") } } else if os.IsNotExist(err) { - klog.Infof("No plugin config file found, skipping: %s", *pluginConfigFile) + klog.InfoS("No plugin config file found, skipping", "configFile", *pluginConfigFile) } else { - klog.Errorf("Failed to check plugin config file: %v", err) + klog.ErrorS(err, "Failed to check plugin config file") } // make sure the new docker key ring is made and set after the credential plugins are registered secret.MakeAndSetKeyring() diff --git a/pkg/daemon/containermeta/container_meta_controller.go b/pkg/daemon/containermeta/container_meta_controller.go index f951ba52d7..7ba912166d 100644 --- a/pkg/daemon/containermeta/container_meta_controller.go +++ b/pkg/daemon/containermeta/container_meta_controller.go @@ -199,7 +199,7 @@ func (c *Controller) Run(stop <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting containermeta Controller") + klog.Info("Starting containermeta Controller") go c.restarter.Run(stop) for i := 0; i < workers; i++ { go wait.Until(func() { @@ -239,7 +239,7 @@ func (c *Controller) processNextWorkItem() bool { func (c *Controller) sync(key string) (retErr error) { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Warningf("Invalid key: %s", key) + klog.InfoS("Invalid key", "key", key) return nil } @@ -248,7 +248,7 @@ func (c *Controller) sync(key string) (retErr error) { if errors.IsNotFound(err) { return nil } - klog.Errorf("Failed to get Pod %s/%s from lister: %v", namespace, name, err) + klog.ErrorS(err, "Failed to get Pod from lister", "namespace", namespace, "name", name) return err } else if pod.DeletionTimestamp != nil || len(pod.Status.ContainerStatuses) == 0 { return nil @@ -258,24 +258,24 @@ func (c *Controller) sync(key string) (retErr error) { if duration < maxExpectationWaitDuration { return nil } - klog.Warningf("Wait for Pod %s/%s resourceVersion expectation over %v", namespace, name, duration) + klog.InfoS("Waiting Pod resourceVersion expectation time out", "namespace", namespace, "name", name, "duration", duration) resourceVersionExpectation.Delete(pod) } criRuntime, kubeRuntime, err := c.getRuntimeForPod(pod) if err != nil { - klog.Errorf("Failed to get runtime for Pod %s/%s: %v", namespace, name, err) + klog.ErrorS(err, "Failed to get runtime for Pod", "namespace", namespace, "name", name) return nil } else if criRuntime == nil { return nil } - klog.V(3).Infof("Start syncing for %s/%s", namespace, name) + klog.V(3).InfoS("Start syncing", "namespace", namespace, "name", name) defer func() { if retErr != nil { - klog.Errorf("Failed to sync for %s/%s: %v", namespace, name, retErr) + klog.ErrorS(retErr, "Failed to sync", "namespace", namespace, "name", name) } else { - klog.V(3).Infof("Finished syncing for %s/%s", namespace, name) + klog.V(3).InfoS("Finished syncing", "namespace", namespace, "name", name) } }() @@ -286,7 +286,7 @@ func (c *Controller) sync(key string) (retErr error) { oldMetaSet, err := appspub.GetRuntimeContainerMetaSet(pod) if err != nil { - klog.Warningf("Failed to get old runtime meta from Pod %s/%s: %v", namespace, name, err) + klog.ErrorS(err, "Failed to get old runtime meta from Pod", "namespace", namespace, "name", name) } newMetaSet := c.manageContainerMetaSet(pod, kubePodStatus, oldMetaSet, criRuntime) @@ -302,7 +302,7 @@ func (c *Controller) reportContainerMetaSet(pod *v1.Pod, oldMetaSet, newMetaSet ObjectMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name}, } containerMetaSetStr := util.DumpJSON(newMetaSet) - klog.Infof("Reporting container meta changed in Pod %s/%s: %v", pod.Namespace, pod.Name, containerMetaSetStr) + klog.InfoS("Reporting container meta changed in Pod", "namespace", pod.Namespace, "name", pod.Name, "containerMetaSetStr", containerMetaSetStr) mergePatch, _ := json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]string{ @@ -354,11 +354,11 @@ func (c *Controller) manageContainerMetaSet(pod *v1.Pod, kubePodStatus *kubeletc envGetter := wrapEnvGetter(criRuntime, status.ID.ID, fmt.Sprintf("container %s (%s) in Pod %s/%s", containerSpec.Name, status.ID.String(), pod.Namespace, pod.Name)) containerMeta.Hashes.ExtractedEnvFromMetadataHash, err = envHasher.GetCurrentHash(containerSpec, envGetter) if err != nil { - klog.Errorf("Failed to hash container %s (%s) with env for Pod %s/%s: %v", containerSpec.Name, status.ID.String(), pod.Namespace, pod.Name, err) + klog.ErrorS(err, "Failed to hash container with env for Pod", "containerName", containerSpec.Name, "containerID", status.ID.String(), "namespace", pod.Namespace, "podName", pod.Name) enqueueAfter(c.queue, pod, time.Second*3) } else { - klog.V(4).Infof("Extracted env from metadata for container %s (%s) in Pod %s/%s, hash: %v", - containerSpec.Name, status.ID.String(), pod.Namespace, pod.Name, containerMeta.Hashes.ExtractedEnvFromMetadataHash) + klog.V(4).InfoS("Extracted env from metadata for container", + "containerName", containerSpec.Name, "containerID", status.ID.String(), "namespace", pod.Namespace, "podName", pod.Name, "hash", containerMeta.Hashes.ExtractedEnvFromMetadataHash) } } @@ -368,7 +368,7 @@ func (c *Controller) manageContainerMetaSet(pod *v1.Pod, kubePodStatus *kubeletc // Trigger restarting when expected env hash is not equal to current hash if containerMeta.Hashes.ExtractedEnvFromMetadataHash > 0 && containerMeta.Hashes.ExtractedEnvFromMetadataHash != envHasher.GetExpectHash(containerSpec, pod) { // Maybe checking PlainHash inconsistent here can skip to trigger restart. But it is not a good idea for some special scenarios. - klog.V(2).Infof("Triggering container %s (%s) in Pod %s/%s to restart, for it has inconsistent hash of env from metadata", containerSpec.Name, status.ID.String(), pod.Namespace, pod.Name) + klog.V(2).InfoS("Triggering container in Pod to restart, for it has inconsistent hash of env from metadata", "containerName", containerSpec.Name, "containerID", status.ID.String(), "namespace", pod.Namespace, "podName", pod.Name) c.restarter.queue.AddRateLimited(status.ID) } } @@ -401,7 +401,7 @@ func wrapEnvGetter(criRuntime criapi.RuntimeService, containerID, logID string) if getErr != nil { return "", getErr } - klog.V(4).Infof("Got env %s=%s in %s", key, envMap[key], logID) + klog.V(4).InfoS("Got env", "key", key, "value", envMap[key], "logID", logID) return envMap[key], nil } } diff --git a/pkg/daemon/containermeta/container_meta_restarter.go b/pkg/daemon/containermeta/container_meta_restarter.go index 4ba4215e37..e50f5a66a0 100644 --- a/pkg/daemon/containermeta/container_meta_restarter.go +++ b/pkg/daemon/containermeta/container_meta_restarter.go @@ -80,21 +80,21 @@ func (c *restartController) processNextWorkItem() bool { func (c *restartController) sync(containerID kubeletcontainer.ContainerID) error { criRuntime := c.runtimeFactory.GetRuntimeServiceByName(containerID.Type) if criRuntime == nil { - klog.Errorf("Not found runtime service for %s in daemon", containerID.Type) + klog.InfoS("Not found runtime service in daemon", "type", containerID.Type) return nil } containers, err := criRuntime.ListContainers(context.TODO(), &runtimeapi.ContainerFilter{Id: containerID.ID}) if err != nil { - klog.Errorf("Failed to list containers by %s: %v", containerID.String(), err) + klog.ErrorS(err, "Failed to list containers", "id", containerID.String()) return err } if len(containers) == 0 || containers[0].State != runtimeapi.ContainerState_CONTAINER_RUNNING { - klog.V(4).Infof("Skip to kill container %s because of not found or non-running state.", containerID.String()) + klog.V(4).InfoS("Skip to kill container because of not found or non-running state.", "id", containerID.String()) return nil } - klog.V(3).Infof("Preparing to stop container %s", containerID.String()) + klog.V(3).InfoS("Preparing to stop container", "id", containerID.String()) kubeRuntime := kuberuntime.NewGenericRuntime(containerID.Type, criRuntime, c.eventRecorder, &http.Client{}) msg := fmt.Sprintf("Stopping containerID %s by container meta restarter", containerID.String()) err = kubeRuntime.KillContainer(nil, containerID, "", msg, nil) diff --git a/pkg/daemon/containerrecreate/crr_daemon_controller.go b/pkg/daemon/containerrecreate/crr_daemon_controller.go index 27094c59a9..fd19005ed2 100644 --- a/pkg/daemon/containerrecreate/crr_daemon_controller.go +++ b/pkg/daemon/containerrecreate/crr_daemon_controller.go @@ -171,7 +171,7 @@ func (c *Controller) Run(stop <-chan struct{}) { return } - klog.Infof("Starting crr daemon controller") + klog.Info("Starting crr daemon controller") for i := 0; i < workers; i++ { go wait.Until(func() { for c.processNextWorkItem() { @@ -210,7 +210,7 @@ func (c *Controller) processNextWorkItem() bool { func (c *Controller) sync(key string) (retErr error) { namespace, podName, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Warningf("Invalid key: %s", key) + klog.InfoS("Invalid key", "key", key) return nil } @@ -235,12 +235,12 @@ func (c *Controller) sync(key string) (retErr error) { return err } - klog.V(3).Infof("Start syncing for %s/%s", namespace, crr.Name) + klog.V(3).InfoS("Start syncing", "namespace", namespace, "name", crr.Name) defer func() { if retErr != nil { - klog.Errorf("Failed to sync for %s/%s: %v", namespace, crr.Name, retErr) + klog.ErrorS(retErr, "Failed to sync", "namespace", namespace, "name", crr.Name) } else { - klog.V(3).Infof("Finished syncing for %s/%s", namespace, crr.Name) + klog.V(3).InfoS("Finished syncing", "namespace", namespace, "name", crr.Name) } }() @@ -252,19 +252,19 @@ func (c *Controller) sync(key string) (retErr error) { if crr.Spec.Strategy.UnreadyGracePeriodSeconds != nil { unreadyTimeStr := crr.Annotations[appsv1alpha1.ContainerRecreateRequestUnreadyAcquiredKey] if unreadyTimeStr == "" { - klog.Infof("CRR %s/%s is waiting for unready acquirement.", crr.Namespace, crr.Name) + klog.InfoS("CRR is waiting for unready acquirement", "namespace", crr.Namespace, "name", crr.Name) return nil } unreadyTime, err := time.Parse(time.RFC3339, unreadyTimeStr) if err != nil { - klog.Errorf("CRR %s/%s failed to parse unready time %s: %v", crr.Namespace, crr.Name, unreadyTimeStr, err) + klog.ErrorS(err, "CRR failed to parse unready time", "namespace", crr.Namespace, "name", crr.Name, "unreadyTimeStr", unreadyTimeStr) return c.completeCRRStatus(crr, fmt.Sprintf("failed to parse unready time %s: %v", unreadyTimeStr, err)) } leftTime := time.Duration(*crr.Spec.Strategy.UnreadyGracePeriodSeconds)*time.Second - time.Since(unreadyTime) if leftTime > 0 { - klog.Infof("CRR %s/%s is waiting for unready grace period %v left time.", crr.Namespace, crr.Name, leftTime) + klog.InfoS("CRR is waiting for unready grace period", "namespace", crr.Namespace, "name", crr.Name, "leftTime", leftTime) c.queue.AddAfter(crr.Namespace+"/"+crr.Spec.PodName, leftTime+100*time.Millisecond) return nil } @@ -287,7 +287,7 @@ func (c *Controller) pickRecreateRequest(crrList []*appsv1alpha1.ContainerRecrea if duration < maxExpectationWaitDuration { break } - klog.Warningf("Wait for CRR %s/%s resourceVersion expectation over %v", crr.Namespace, crr.Name, duration) + klog.InfoS("Wait for CRR resourceVersion expectation", "namespace", crr.Namespace, "name", crr.Name, "duration", duration) resourceVersionExpectation.Delete(crr) } @@ -296,7 +296,7 @@ func (c *Controller) pickRecreateRequest(crrList []*appsv1alpha1.ContainerRecrea picked = crr } else if crr.Status.Phase == "" { if err := c.updateCRRPhase(crr, appsv1alpha1.ContainerRecreateRequestPending); err != nil { - klog.Errorf("Failed to update CRR %s/%s status to Pending: %v", crr.Namespace, crr.Name, err) + klog.ErrorS(err, "Failed to update CRR status to Pending", "namespace", crr.Namespace, "name", crr.Name) return nil, err } } @@ -307,7 +307,7 @@ func (c *Controller) pickRecreateRequest(crrList []*appsv1alpha1.ContainerRecrea func (c *Controller) manage(crr *appsv1alpha1.ContainerRecreateRequest) error { runtimeManager, err := c.newRuntimeManager(c.runtimeFactory, crr) if err != nil { - klog.Errorf("Failed to find runtime service for %s/%s: %v", crr.Namespace, crr.Name, err) + klog.ErrorS(err, "Failed to find runtime service", "namespace", crr.Namespace, "name", crr.Name) return c.completeCRRStatus(crr, fmt.Sprintf("failed to find runtime service: %v", err)) } @@ -317,7 +317,7 @@ func (c *Controller) manage(crr *appsv1alpha1.ContainerRecreateRequest) error { if err != nil { return fmt.Errorf("failed to GetPodStatus %s/%s with uid %s: %v", pod.Namespace, pod.Name, pod.UID, err) } - klog.V(5).Infof("CRR %s/%s for Pod %s GetPodStatus: %v", crr.Namespace, crr.Name, pod.Name, util.DumpJSON(podStatus)) + klog.V(5).InfoS("CRR for Pod GetPodStatus", "namespace", crr.Namespace, "name", crr.Name, "podName", pod.Name, "podStatus", util.DumpJSON(podStatus)) newCRRContainerRecreateStates := getCurrentCRRContainersRecreateStates(crr, podStatus) if !reflect.DeepEqual(crr.Status.ContainerRecreateStates, newCRRContainerRecreateStates) { @@ -356,7 +356,7 @@ func (c *Controller) manage(crr *appsv1alpha1.ContainerRecreateRequest) error { msg := fmt.Sprintf("Stopping container %s by ContainerRecreateRequest %s", state.Name, crr.Name) err := runtimeManager.KillContainer(pod, kubeContainerStatus.ID, state.Name, msg, nil) if err != nil { - klog.Errorf("Failed to kill container %s in Pod %s/%s for CRR %s/%s: %v", state.Name, pod.Namespace, pod.Name, crr.Namespace, crr.Name, err) + klog.ErrorS(err, "Failed to kill container in Pod for CRR", "containerName", state.Name, "podNamespace", pod.Namespace, "podName", pod.Name, "crrNamespace", crr.Namespace, "crrName", crr.Name) state.Phase = appsv1alpha1.ContainerRecreateRequestFailed state.Message = fmt.Sprintf("kill container error: %v", err) if crr.Spec.Strategy.FailurePolicy == appsv1alpha1.ContainerRecreateRequestFailurePolicyIgnore { @@ -385,7 +385,7 @@ func (c *Controller) manage(crr *appsv1alpha1.ContainerRecreateRequest) error { } func (c *Controller) patchCRRContainerRecreateStates(crr *appsv1alpha1.ContainerRecreateRequest, newCRRContainerRecreateStates []appsv1alpha1.ContainerRecreateRequestContainerRecreateState) error { - klog.V(3).Infof("CRR %s/%s patch containerRecreateStates: %v", crr.Namespace, crr.Name, util.DumpJSON(newCRRContainerRecreateStates)) + klog.V(3).InfoS("CRR patch containerRecreateStates", "namespace", crr.Namespace, "name", crr.Name, "states", util.DumpJSON(newCRRContainerRecreateStates)) crr = crr.DeepCopy() body := fmt.Sprintf(`{"status":{"containerRecreateStates":%s}}`, util.DumpJSON(newCRRContainerRecreateStates)) oldRev := crr.ResourceVersion diff --git a/pkg/daemon/containerrecreate/crr_daemon_util.go b/pkg/daemon/containerrecreate/crr_daemon_util.go index 9e9c90a161..0a0869f9e9 100644 --- a/pkg/daemon/containerrecreate/crr_daemon_util.go +++ b/pkg/daemon/containerrecreate/crr_daemon_util.go @@ -161,7 +161,7 @@ func getCRRSyncContainerStatuses(crr *appsv1alpha1.ContainerRecreateRequest) map } var syncContainerStatuses []appsv1alpha1.ContainerRecreateRequestSyncContainerStatus if err := json.Unmarshal([]byte(str), &syncContainerStatuses); err != nil { - klog.Errorf("Failed to unmarshal CRR %s/%s syncContainerStatuses %s: %v", crr.Namespace, crr.Name, str, err) + klog.ErrorS(err, "Failed to unmarshal CRR syncContainerStatuses", "namespace", crr.Namespace, "name", crr.Name, "rawString", str) return nil } diff --git a/pkg/daemon/criruntime/factory.go b/pkg/daemon/criruntime/factory.go index 37f986c1bf..746de9c90f 100644 --- a/pkg/daemon/criruntime/factory.go +++ b/pkg/daemon/criruntime/factory.go @@ -95,39 +95,39 @@ func NewFactory(varRunPath string, accountManager daemonutil.ImagePullAccountMan case ContainerRuntimeContainerd, ContainerRuntimeCommonCRI, ContainerRuntimePouch: addr, _, err := kubeletutil.GetAddressAndDialer(cfg.runtimeRemoteURI) if err != nil { - klog.Warningf("Failed to get address for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to get address", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } imageService, err = runtimeimage.NewCRIImageService(addr, accountManager) if err != nil { - klog.Warningf("Failed to new image service for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to new image service", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } case ContainerRuntimeDocker: imageService, err = runtimeimage.NewDockerImageService(cfg.runtimeURI, accountManager) if err != nil { - klog.Warningf("Failed to new image service for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to new image service", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } } if _, err = imageService.ListImages(context.TODO()); err != nil { - klog.Warningf("Failed to list images for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to list images", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } runtimeService, err = criremote.NewRemoteRuntimeService(cfg.runtimeRemoteURI, time.Second*5, oteltrace.NewNoopTracerProvider()) if err != nil { - klog.Warningf("Failed to new runtime service for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to new runtime service", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } typedVersion, err = runtimeService.Version(context.TODO(), kubeRuntimeAPIVersion) if err != nil { - klog.Warningf("Failed to get runtime typed version for %v (%s, %s): %v", cfg.runtimeType, cfg.runtimeURI, cfg.runtimeRemoteURI, err) + klog.ErrorS(err, "Failed to get runtime typed version", "runtimeType", cfg.runtimeType, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) continue } - klog.V(2).Infof("Add runtime impl %v, URI: (%s, %s)", typedVersion.RuntimeName, cfg.runtimeURI, cfg.runtimeRemoteURI) + klog.V(2).InfoS("Add runtime", "runtimeName", typedVersion.RuntimeName, "runtimeURI", cfg.runtimeURI, "runtimeRemoteURI", cfg.runtimeRemoteURI) f.impls = append(f.impls, &runtimeImpl{ cfg: cfg, runtimeName: typedVersion.RuntimeName, @@ -170,9 +170,9 @@ func detectRuntime(varRunPath string) (cfgs []runtimeConfig) { runtimeType: ContainerRuntimeCommonCRI, runtimeRemoteURI: fmt.Sprintf("unix://%s/%s", varRunPath, *CRISocketFileName), }) - klog.Infof("Find configured CRI socket %s with given flag", filePath) + klog.InfoS("Find configured CRI socket with given flag", "filePath", filePath) } else { - klog.Errorf("Failed to stat the CRI socket %s with given flag: %v", filePath, err) + klog.ErrorS(err, "Failed to stat the CRI socket with given flag", "filePath", filePath) } return } @@ -190,9 +190,10 @@ func detectRuntime(varRunPath string) (cfgs []runtimeConfig) { runtimeRemoteURI: fmt.Sprintf("unix://%s/pouchcri.sock", varRunPath), }) } else if err1 == nil && err2 != nil { - klog.Errorf("%s/pouchd.sock exists, but not found %s/pouchcri.sock", varRunPath, varRunPath) + klog.ErrorS(err2, "pouchd.sock exists, but not found pouchcri.sock", "varRunPath", varRunPath) } else if err1 != nil && err2 == nil { - klog.Errorf("%s/pouchdcri.sock exists, but not found %s/pouchd.sock", varRunPath, varRunPath) + // structured logging seems not necessary here + klog.ErrorS(err1, "pouchdcri.sock exists, but not found pouchd.sock", "varRunPath", varRunPath) } } @@ -207,9 +208,11 @@ func detectRuntime(varRunPath string) (cfgs []runtimeConfig) { runtimeRemoteURI: fmt.Sprintf("unix://%s/dockershim.sock", varRunPath), }) } else if err1 == nil && err2 != nil { - klog.Errorf("%s/docker.sock exists, but not found %s/dockershim.sock", varRunPath, varRunPath) + // structured logging seems not necessary here + klog.ErrorS(err2, "docker.sock exists, but not found dockershim.sock", "varRunPath", varRunPath) } else if err1 != nil && err2 == nil { - klog.Errorf("%s/dockershim.sock exists, but not found %s/docker.sock", varRunPath, varRunPath) + // structured logging seems not necessary here + klog.ErrorS(err1, "dockershim.sock exists, but not found docker.sock", "varRunPath", varRunPath) } } diff --git a/pkg/daemon/criruntime/imageruntime/cri.go b/pkg/daemon/criruntime/imageruntime/cri.go index 957212d299..9de9abc591 100644 --- a/pkg/daemon/criruntime/imageruntime/cri.go +++ b/pkg/daemon/criruntime/imageruntime/cri.go @@ -137,7 +137,7 @@ func (c *commonCRIImageService) pullImageV1(ctx context.Context, imageName, tag var pullErrs []error for _, authInfo := range authInfos { var pullErr error - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullImageReq.Auth = &runtimeapi.AuthConfig{ Username: authInfo.Username, Password: authInfo.Password, @@ -147,7 +147,7 @@ func (c *commonCRIImageService) pullImageV1(ctx context.Context, imageName, tag pipeW.CloseWithError(io.EOF) return newImagePullStatusReader(pipeR), nil } - klog.Warningf("Failed to pull image %v:%v with user %v, err %v", imageName, tag, authInfo.Username, pullErr) + klog.ErrorS(pullErr, "Failed to pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullErrs = append(pullErrs, pullErr) } @@ -155,7 +155,7 @@ func (c *commonCRIImageService) pullImageV1(ctx context.Context, imageName, tag err = utilerrors.NewAggregate(pullErrs) } } else { - klog.Errorf("Failed to convert to auth info for registry, err %v", err) + klog.ErrorS(err, "Failed to convert to auth info for registry") } // Try the default secret @@ -164,10 +164,10 @@ func (c *commonCRIImageService) pullImageV1(ctx context.Context, imageName, tag var defaultErr error authInfo, defaultErr = c.accountManager.GetAccountInfo(registry) if defaultErr != nil { - klog.Warningf("Failed to get account for registry %v, err %v", registry, defaultErr) + klog.ErrorS(defaultErr, "Failed to get account for registry", "registry", registry) // When the default account acquisition fails, try to pull anonymously } else if authInfo != nil { - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullImageReq.Auth = &runtimeapi.AuthConfig{ Username: authInfo.Username, Password: authInfo.Password, @@ -177,7 +177,7 @@ func (c *commonCRIImageService) pullImageV1(ctx context.Context, imageName, tag pipeW.CloseWithError(io.EOF) return newImagePullStatusReader(pipeR), nil } - klog.Warningf("Failed to pull image %v:%v, err %v", imageName, tag, err) + klog.ErrorS(err, "Failed to pull image", "imageName", imageName, "tag", tag) return nil, err } } @@ -258,7 +258,7 @@ func (c *commonCRIImageService) pullImageV1alpha2(ctx context.Context, imageName var pullErrs []error for _, authInfo := range authInfos { var pullErr error - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullImageReq.Auth = &runtimeapiv1alpha2.AuthConfig{ Username: authInfo.Username, Password: authInfo.Password, @@ -268,7 +268,7 @@ func (c *commonCRIImageService) pullImageV1alpha2(ctx context.Context, imageName pipeW.CloseWithError(io.EOF) return newImagePullStatusReader(pipeR), nil } - klog.Warningf("Failed to pull image %v:%v with user %v, err %v", imageName, tag, authInfo.Username, pullErr) + klog.ErrorS(pullErr, "Failed to pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullErrs = append(pullErrs, pullErr) } @@ -284,10 +284,10 @@ func (c *commonCRIImageService) pullImageV1alpha2(ctx context.Context, imageName var defaultErr error authInfo, defaultErr = c.accountManager.GetAccountInfo(registry) if defaultErr != nil { - klog.Warningf("Failed to get account for registry %v, err %v", registry, defaultErr) + klog.ErrorS(defaultErr, "Failed to get account for registry %v, err %v", "registry", registry) // When the default account acquisition fails, try to pull anonymously } else if authInfo != nil { - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullImageReq.Auth = &runtimeapiv1alpha2.AuthConfig{ Username: authInfo.Username, Password: authInfo.Password, @@ -297,7 +297,7 @@ func (c *commonCRIImageService) pullImageV1alpha2(ctx context.Context, imageName pipeW.CloseWithError(io.EOF) return newImagePullStatusReader(pipeR), nil } - klog.Warningf("Failed to pull image %v:%v, err %v", imageName, tag, err) + klog.ErrorS(err, "Failed to pull image", "imageName", imageName, "tag", tag) return nil, err } } diff --git a/pkg/daemon/criruntime/imageruntime/docker.go b/pkg/daemon/criruntime/imageruntime/docker.go index 1156881035..eb78bd5677 100644 --- a/pkg/daemon/criruntime/imageruntime/docker.go +++ b/pkg/daemon/criruntime/imageruntime/docker.go @@ -87,20 +87,20 @@ func (d *dockerImageService) PullImage(ctx context.Context, imageName, tag strin var pullErrs []error for _, authInfo := range authInfos { var pullErr error - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) ioReader, pullErr = d.client.ImagePull(ctx, fullName, dockertypes.ImagePullOptions{RegistryAuth: authInfo.EncodeToString()}) if pullErr == nil { return newImagePullStatusReader(ioReader), nil } d.handleRuntimeError(pullErr) - klog.Warningf("Failed to pull image %v:%v with user %v, err %v", imageName, tag, authInfo.Username, pullErr) + klog.ErrorS(pullErr, "Failed to pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullErrs = append(pullErrs, pullErr) } if len(pullErrs) > 0 { err = utilerrors.NewAggregate(pullErrs) } } else { - klog.Errorf("Failed to convert to auth info for registry, err %v", err) + klog.ErrorS(err, "Failed to convert to auth info for registry") } // Try the default secret @@ -109,16 +109,16 @@ func (d *dockerImageService) PullImage(ctx context.Context, imageName, tag strin var defaultErr error authInfo, defaultErr = d.accountManager.GetAccountInfo(registry) if defaultErr != nil { - klog.Warningf("Failed to get account for registry %v, err %v", registry, defaultErr) + klog.ErrorS(defaultErr, "Failed to get account for registry", "registry", registry) // When the default account acquisition fails, try to pull anonymously } else if authInfo != nil { - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) ioReader, err = d.client.ImagePull(ctx, fullName, dockertypes.ImagePullOptions{RegistryAuth: authInfo.EncodeToString()}) if err == nil { return newImagePullStatusReader(ioReader), nil } d.handleRuntimeError(err) - klog.Warningf("Failed to pull image %v:%v, err %v", imageName, tag, err) + klog.ErrorS(err, "Failed to pull image", "imageName", imageName, "tag", tag) } } @@ -127,10 +127,11 @@ func (d *dockerImageService) PullImage(ctx context.Context, imageName, tag strin } // Anonymous pull - klog.V(5).Infof("Pull image %v:%v anonymous", imageName, tag) + klog.V(5).InfoS("Pull image anonymously", "imageName", imageName, "tag", tag) ioReader, err = d.client.ImagePull(ctx, fullName, dockertypes.ImagePullOptions{}) if err != nil { d.handleRuntimeError(err) + klog.ErrorS(err, "Failed to pull image", "imageName", imageName, "tag", tag) return nil, err } return newImagePullStatusReader(ioReader), nil diff --git a/pkg/daemon/criruntime/imageruntime/fake_plugin/main.go b/pkg/daemon/criruntime/imageruntime/fake_plugin/main.go index e19814b3ce..06a9433201 100644 --- a/pkg/daemon/criruntime/imageruntime/fake_plugin/main.go +++ b/pkg/daemon/criruntime/imageruntime/fake_plugin/main.go @@ -143,7 +143,7 @@ func newCredentialProviderCommand() *cobra.Command { Short: "ACR credential provider for kubelet", Run: func(cmd *cobra.Command, args []string) { if err := runPlugin(context.TODO(), os.Stdin, os.Stdout, os.Args[1:]); err != nil { - klog.Errorf("Error running credential provider plugin: %v", err) + klog.ErrorS(err, "Error running credential provider plugin") os.Exit(1) } }, diff --git a/pkg/daemon/criruntime/imageruntime/helpers.go b/pkg/daemon/criruntime/imageruntime/helpers.go index a3f3560ca3..d2ff4aa0ab 100644 --- a/pkg/daemon/criruntime/imageruntime/helpers.go +++ b/pkg/daemon/criruntime/imageruntime/helpers.go @@ -182,17 +182,17 @@ func (r *imagePullStatusReader) mainloop() { return } if err != nil { - klog.V(5).Infof("runtime read err %v", err) + klog.V(5).ErrorS(err, "runtime read err") r.seedPullStatus(ImagePullStatus{Err: err, Finish: true}) return } if jm.Error != nil { - klog.V(5).Infof("runtime read err %v", jm.Error) + klog.V(5).ErrorS(jm.Error, "runtime read err") r.seedPullStatus(ImagePullStatus{Err: fmt.Errorf("get error in pull response: %+v", jm.Error), Finish: true}) return } - klog.V(5).Infof("runtime read progress %v", util.DumpJSON(jm)) + klog.V(5).InfoS("runtime read progress", "message", util.DumpJSON(jm)) if jm.ID != "" { progress.Layers[jm.ID] = layerProgress{ JSONProgress: jm.Progress, diff --git a/pkg/daemon/criruntime/imageruntime/helpers_test.go b/pkg/daemon/criruntime/imageruntime/helpers_test.go index d04362c71d..11499dd424 100644 --- a/pkg/daemon/criruntime/imageruntime/helpers_test.go +++ b/pkg/daemon/criruntime/imageruntime/helpers_test.go @@ -122,7 +122,7 @@ func TestMatchRegistryAuths(t *testing.T) { // this is to test whether kruise could get expected auths if plugin fails to run err := plugin.RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir) if err != nil { - klog.Errorf("Failed to register credential provider plugins: %v", err) + klog.ErrorS(err, "Failed to register credential provider plugins") } secret.MakeAndSetKeyring() for _, cs := range cases { diff --git a/pkg/daemon/criruntime/imageruntime/pouch.go b/pkg/daemon/criruntime/imageruntime/pouch.go index f85957cd13..2152a4c45c 100644 --- a/pkg/daemon/criruntime/imageruntime/pouch.go +++ b/pkg/daemon/criruntime/imageruntime/pouch.go @@ -86,20 +86,20 @@ func (d *pouchImageService) PullImage(ctx context.Context, imageName, tag string var pullErrs []error for _, authInfo := range authInfos { var pullErr error - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) ioReader, pullErr = d.client.ImagePull(ctx, imageName, tag, authInfo.EncodeToString()) if pullErr == nil { return newImagePullStatusReader(ioReader), nil } d.handleRuntimeError(pullErr) - klog.Warningf("Failed to pull image %v:%v with user %v, err %v", imageName, tag, authInfo.Username, pullErr) + klog.ErrorS(pullErr, "Failed to pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) pullErrs = append(pullErrs, pullErr) } if len(pullErrs) > 0 { err = utilerrors.NewAggregate(pullErrs) } } else { - klog.Errorf("Failed to convert to auth info for registry, err %v", err) + klog.ErrorS(err, "Failed to convert to auth info for registry") } // Try the default secret @@ -108,16 +108,16 @@ func (d *pouchImageService) PullImage(ctx context.Context, imageName, tag string var defaultErr error authInfo, defaultErr = d.accountManager.GetAccountInfo(registry) if defaultErr != nil { - klog.Warningf("Failed to get account for registry %v, err %v", registry, defaultErr) + klog.ErrorS(defaultErr, "Failed to get account for registry", "registry", registry) // When the default account acquisition fails, try to pull anonymously } else if authInfo != nil { - klog.V(5).Infof("Pull image %v:%v with user %v", imageName, tag, authInfo.Username) + klog.V(5).InfoS("Pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) ioReader, err = d.client.ImagePull(ctx, imageName, tag, authInfo.EncodeToString()) if err == nil { return newImagePullStatusReader(ioReader), nil } d.handleRuntimeError(err) - klog.Warningf("Failed to pull image %v:%v with user %v, err %v", imageName, tag, authInfo.Username, err) + klog.ErrorS(err, "Failed to pull image with user", "imageName", imageName, "tag", tag, "user", authInfo.Username) } } @@ -126,7 +126,7 @@ func (d *pouchImageService) PullImage(ctx context.Context, imageName, tag string } // Anonymous pull - klog.V(5).Infof("Pull image %v:%v anonymous", imageName, tag) + klog.V(5).InfoS("Pull image anonymously", "imageName", imageName, "tag", tag) ioReader, err = d.client.ImagePull(ctx, imageName, tag, "") if err != nil { d.handleRuntimeError(err) diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index e86fca3feb..8943063552 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -98,7 +98,7 @@ func NewDaemon(cfg *rest.Config, bindAddress string) (Daemon, error) { if err != nil { return nil, err } - klog.Infof("Starting daemon on %v ...", nodeName) + klog.InfoS("Starting daemon", "nodeName", nodeName) listener, err := net.Listen("tcp", bindAddress) if err != nil { diff --git a/pkg/daemon/imagepuller/imagepuller_controller.go b/pkg/daemon/imagepuller/imagepuller_controller.go index d83a4d19dc..398f0c21df 100644 --- a/pkg/daemon/imagepuller/imagepuller_controller.go +++ b/pkg/daemon/imagepuller/imagepuller_controller.go @@ -86,7 +86,7 @@ func NewController(opts daemonoptions.Options, secretManager daemonutil.SecretMa return } if reflect.DeepEqual(oldNodeImage.Spec, newNodeImage.Spec) { - klog.V(5).Infof("Find imagePullNode %s spec has not changed, skip enqueueing.", newNodeImage.Name) + klog.V(5).InfoS("Find imagePullNode spec has not changed, skip enqueueing.", "nodeImage", newNodeImage.Name) return } logNewImages(oldNodeImage, newNodeImage) @@ -156,7 +156,7 @@ func (c *Controller) Run(stop <-chan struct{}) { return } - klog.Infof("Starting puller controller") + klog.Info("Starting puller controller") // Launch one workers to process resources, for there is only one NodeImage per Node go wait.Until(func() { for c.processNextWorkItem() { @@ -194,7 +194,7 @@ func (c *Controller) processNextWorkItem() bool { func (c *Controller) sync(key string) (retErr error) { _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Warningf("Invalid key: %s", key) + klog.InfoS("Invalid key", "key", key) return nil } @@ -202,16 +202,16 @@ func (c *Controller) sync(key string) (retErr error) { if errors.IsNotFound(err) { return nil } else if err != nil { - klog.Errorf("Failed to get NodeImage %s: %v", name, err) + klog.ErrorS(err, "Failed to get NodeImage %s: %v", "nodeImage", name) return err } - klog.V(3).Infof("Start syncing for %s", name) + klog.V(3).InfoS("Start syncing", "name", name) defer func() { if retErr != nil { - klog.Errorf("Failed to sync for %s: %v", name, retErr) + klog.ErrorS(retErr, "Failed to sync", "name", name) } else { - klog.V(3).Infof("Finished syncing for %s", name) + klog.V(3).InfoS("Finished syncing", "name", name) } }() @@ -229,7 +229,7 @@ func (c *Controller) sync(key string) (retErr error) { imageStatus := c.puller.GetStatus(imageName) if klog.V(9).Enabled() { - klog.V(9).Infof("get image %v status %#v", imageName, imageStatus) + klog.V(9).InfoS("get image status", "imageName", imageName, "imageStatus", imageStatus) } if imageStatus == nil { continue diff --git a/pkg/daemon/imagepuller/imagepuller_worker.go b/pkg/daemon/imagepuller/imagepuller_worker.go index 9c64e244fb..0f28d810c3 100644 --- a/pkg/daemon/imagepuller/imagepuller_worker.go +++ b/pkg/daemon/imagepuller/imagepuller_worker.go @@ -18,6 +18,7 @@ package imagepuller import ( "context" + "errors" "fmt" "sync" "time" @@ -76,14 +77,14 @@ func newRealPuller(runtime runtimeimage.ImageService, secretManager daemonutil.S // Sync all images to pull func (p *realPuller) Sync(obj *appsv1alpha1.NodeImage, ref *v1.ObjectReference) error { - klog.V(5).Infof("sync puller for spec %v", util.DumpJSON(obj)) + klog.V(5).InfoS("sync puller", "spec", util.DumpJSON(obj)) p.Lock() defer p.Unlock() // stop all workers not in the spec for imageName := range p.workerPools { if _, ok := obj.Spec.Images[imageName]; !ok { - klog.V(3).Infof("stop workerpool for %v", imageName) + klog.V(3).InfoS("stop workerpool", "imageName", imageName) pool := p.workerPools[imageName] delete(p.workerPools, imageName) pool.Stop() @@ -93,7 +94,7 @@ func (p *realPuller) Sync(obj *appsv1alpha1.NodeImage, ref *v1.ObjectReference) for imageName, imageSpec := range obj.Spec.Images { pool, ok := p.workerPools[imageName] if !ok { - klog.V(3).Infof("starting new workerpool for %v", imageName) + klog.V(3).InfoS("starting new workerpool", "imageName", imageName) pool = newRealWorkerPool(imageName, p.runtime, p.secretManager, p.eventRecorder) p.workerPools[imageName] = pool } @@ -157,15 +158,15 @@ func newRealWorkerPool(name string, runtime runtimeimage.ImageService, secretMan func (w *realWorkerPool) Sync(spec *appsv1alpha1.ImageSpec, status *appsv1alpha1.ImageStatus, ref *v1.ObjectReference) error { if !w.active { - klog.Infof("workerPool %v has exited", w.name) + klog.InfoS("workerPool has exited", "name", w.name) return nil } - klog.V(5).Infof("sync worker pool for %v", w.name) + klog.V(5).InfoS("sync worker pool", "name", w.name) secrets, err := w.secretManager.GetSecrets(spec.PullSecrets) if err != nil { - klog.Warningf("failed to get secrets %v, err %v", spec.PullSecrets, err) + klog.ErrorS(err, "failed to get secrets", "pullSecrets", spec.PullSecrets) return err } @@ -196,11 +197,11 @@ func (w *realWorkerPool) Sync(spec *appsv1alpha1.ImageSpec, status *appsv1alpha1 for tag, worker := range w.pullWorkers { tagSpec, ok := activeTags[tag] if !ok { - klog.V(4).Infof("stopping worker %v which is not in spec", worker.ImageRef()) + klog.V(4).InfoS("stopping worker which is not in spec", "imageRef", worker.ImageRef()) delete(w.pullWorkers, tag) worker.Stop() } else if tagSpec.Version != worker.tagSpec.Version { - klog.V(4).Infof("stopping worker %v which is old version %v -> %v", worker.ImageRef(), worker.tagSpec.Version, tagSpec.Version) + klog.V(4).InfoS("stopping worker with old version", "imageRef", worker.ImageRef(), "old", worker.tagSpec.Version, "new", tagSpec.Version) delete(w.pullWorkers, tag) worker.Stop() } @@ -305,7 +306,7 @@ func (w *pullWorker) Stop() { w.Lock() defer w.Unlock() if w.active { - klog.Warningf("Worker to pull image %s:%s is stopped", w.name, w.tagSpec.Tag) + klog.InfoS("Worker to pull image is stopped", "name", w.name, "tag", w.tagSpec.Tag) w.active = false close(w.stopCh) } @@ -316,7 +317,7 @@ func (w *pullWorker) IsActive() bool { } func (w *pullWorker) Run() { - klog.V(3).Infof("starting worker %v version %v", w.ImageRef(), w.tagSpec.Version) + klog.V(3).InfoS("starting worker", "image", w.ImageRef(), "version", w.tagSpec.Version) tag := w.tagSpec.Tag startTime := metav1.Now() @@ -336,9 +337,9 @@ func (w *pullWorker) Run() { defer func() { cost := time.Since(startTime.Time) if newStatus.Phase == appsv1alpha1.ImagePhaseFailed { - klog.Warningf("Worker failed to pull image %s:%s, cost %v, err: %v", w.name, tag, cost, newStatus.Message) + klog.ErrorS(errors.New(newStatus.Message), "Worker failed to pull image", "name", w.name, "tag", tag, "cost", cost) } else { - klog.Infof("Successfully pull image %s:%s, cost %vs", w.name, tag, cost) + klog.InfoS("Successfully pull image", "name", w.name, "tag", tag, "cost", cost) } if w.IsActive() { w.statusUpdater.UpdateStatus(newStatus) @@ -387,7 +388,7 @@ func (w *pullWorker) Run() { break } - klog.Warningf("Pulling image %s:%s backoff %d, error %v", w.name, tag, i+1, lastError) + klog.ErrorS(lastError, "Pulling image backoff", "name", w.name, "tag", tag, "backoff", i+1) time.Sleep(step) step = minDuration(2*step, maxBackoff) continue @@ -418,7 +419,7 @@ func (w *pullWorker) Run() { func (w *pullWorker) getImageInfo(ctx context.Context) (*runtimeimage.ImageInfo, error) { imageInfos, err := w.runtime.ListImages(ctx) if err != nil { - klog.V(5).Infof("List images failed, err %v", err) + klog.V(5).ErrorS(err, "List images failed") return nil, err } for _, info := range imageInfos { @@ -434,16 +435,16 @@ func (w *pullWorker) doPullImage(ctx context.Context, newStatus *appsv1alpha1.Im tag := w.tagSpec.Tag startTime := metav1.Now() - klog.Infof("Worker is starting to pull image %s:%s version %v", w.name, tag, w.tagSpec.Version) + klog.InfoS("Worker is starting to pull image", "name", w.name, "tag", tag, "version", w.tagSpec.Version) if info, e := w.getImageInfo(ctx); imagePullPolicy == appsv1alpha1.PullAlways { if e == nil && !w.shouldPull(info.ID, w.name, tag, w.secrets) { - klog.Infof("Image %s:%s is already exists", w.name, tag) + klog.InfoS("Image is already exists", "name", w.name, "tag", tag) newStatus.Progress = 100 return nil } } else if e == nil { - klog.Infof("Image %s:%s is already exists", w.name, tag) + klog.InfoS("Image is already exists", "name", w.name, "tag", tag) newStatus.Progress = 100 return nil } @@ -468,11 +469,11 @@ func (w *pullWorker) doPullImage(ctx context.Context, newStatus *appsv1alpha1.Im select { case <-w.stopCh: go closeStatusReader() - klog.V(2).Infof("Pulling image %v:%v is stopped.", w.name, tag) + klog.V(2).InfoS("Pulling image stopped", "name", w.name, "tag", tag) return fmt.Errorf("pulling image %s:%s is stopped", w.name, tag) case <-ctx.Done(): go closeStatusReader() - klog.V(2).Infof("Pulling image %s:%s is canceled", w.name, tag) + klog.V(2).InfoS("Pulling image canceled", "name", w.name, "tag", tag) return fmt.Errorf("pulling image %s:%s is canceled", w.name, tag) case <-pullChan: if err != nil { @@ -489,13 +490,13 @@ func (w *pullWorker) doPullImage(ctx context.Context, newStatus *appsv1alpha1.Im for { select { case <-w.stopCh: - klog.V(2).Infof("Pulling image %v:%v is stopped.", w.name, tag) + klog.V(2).InfoS("Pulling image stopped", "name", w.name, "tag", tag) return fmt.Errorf("pulling image %s:%s is stopped", w.name, tag) case <-ctx.Done(): - klog.V(2).Infof("Pulling image %s:%s is canceled", w.name, tag) + klog.V(2).InfoS("Pulling image canceled", w.name, tag) return fmt.Errorf("pulling image %s:%s is canceled", w.name, tag) case <-logTicker.C: - klog.V(2).Infof("Pulling image %s:%s, cost: %v, progress: %v%%, detail: %v", w.name, tag, time.Since(startTime.Time), progress, progressInfo) + klog.V(2).InfoS("Pulling image", "name", w.name, "tag", tag, "cost", time.Since(startTime.Time), "progress", progress, "detail", progressInfo) case progressStatus, ok := <-statusReader.C(): if !ok { return fmt.Errorf("pulling image %s:%s internal error", w.name, tag) @@ -503,7 +504,7 @@ func (w *pullWorker) doPullImage(ctx context.Context, newStatus *appsv1alpha1.Im progress = progressStatus.Process progressInfo = progressStatus.DetailInfo newStatus.Progress = int32(progressStatus.Process) - klog.V(5).Infof("Pulling image %s:%s, cost: %v, progress: %v%%, detail: %v", w.name, tag, time.Since(startTime.Time), progress, progressInfo) + klog.V(5).InfoS("Pulling image", "name", w.name, "tag", tag, "cost", time.Since(startTime.Time), "progress", progress, "detail", progressInfo) if progressStatus.Finish { if progressStatus.Err == nil { return nil diff --git a/pkg/daemon/imagepuller/utils.go b/pkg/daemon/imagepuller/utils.go index c3baa382bc..4b448c31dd 100644 --- a/pkg/daemon/imagepuller/utils.go +++ b/pkg/daemon/imagepuller/utils.go @@ -49,7 +49,7 @@ func logNewImages(oldObj, newObj *appsv1alpha1.NodeImage) { for _, tagSpec := range imageSpec.Tags { fullName := fmt.Sprintf("%v:%v", image, tagSpec.Tag) if _, ok := oldImages[fullName]; !ok { - klog.V(2).Infof("Received new image %v", fullName) + klog.V(2).InfoS("Received new image", "fullName", fullName) } } } @@ -113,11 +113,11 @@ func (su *statusUpdater) updateStatus(nodeImage *appsv1alpha1.NodeImage, newStat // IMPORTANT!!! Make sure rate limiter is working! if !su.rateLimiter.Allow() { msg := fmt.Sprintf("Updating status is limited qps=%v burst=%v", statusUpdateQPS, statusUpdateBurst) - klog.V(3).Infof(msg) + klog.V(3).Info(msg) return true, nil } - klog.V(5).Infof("Updating status: %v", util.DumpJSON(newStatus)) + klog.V(5).InfoS("Updating status", "status", util.DumpJSON(newStatus)) newNodeImage := nodeImage.DeepCopy() newNodeImage.Status = *newStatus diff --git a/pkg/daemon/kuberuntime/kuberuntime_container.go b/pkg/daemon/kuberuntime/kuberuntime_container.go index dc1a0cb8bd..7e15b1772a 100644 --- a/pkg/daemon/kuberuntime/kuberuntime_container.go +++ b/pkg/daemon/kuberuntime/kuberuntime_container.go @@ -44,7 +44,7 @@ import ( func (m *genericRuntimeManager) recordContainerEvent(pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) { ref, err := kubeletcontainer.GenerateContainerRef(pod, container) if err != nil { - klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) + klog.ErrorS(err, "Can't make a ref to pod container", "pod", format.Pod(pod), "container", container.Name) return } eventMessage := message @@ -168,16 +168,16 @@ func (m *genericRuntimeManager) KillContainer(pod *v1.Pod, containerID kubeletco if gracePeriodOverride != nil { gracePeriod = *gracePeriodOverride - klog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) + klog.V(3).InfoS("Killing container, but using grace period override", "containerID", containerID.String(), "gracePeriod", gracePeriod) } - klog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) + klog.V(2).InfoS("Killing container with grace period", "containerID", containerID.String(), "gracePeriod", gracePeriod) err := m.runtimeService.StopContainer(context.TODO(), containerID.ID, gracePeriod) if err != nil { - klog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err) + klog.ErrorS(err, "Container termination failed with grace period", "containerID", containerID.String(), "gracePeriod", gracePeriod) } else { - klog.V(3).Infof("Container %q exited normally", containerID.String()) + klog.V(3).InfoS("Container exited normally", "containerID", containerID.String()) } return err @@ -229,7 +229,7 @@ func (m *genericRuntimeManager) restoreSpecsFromContainerLabels(containerID kube // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. func (m *genericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubeletcontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { - klog.V(3).Infof("Running preStop hook for container %q", containerID.String()) + klog.V(3).InfoS("Running preStop hook for container", "containerID", containerID.String()) start := metav1.Now() done := make(chan struct{}) @@ -237,16 +237,16 @@ func (m *genericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kube defer close(done) defer utilruntime.HandleCrash() if msg, err := m.runner.Run(context.TODO(), containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { - klog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) + klog.ErrorS(err, "preStop hook for container failed", "name", containerSpec.Name) m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, msg) } }() select { case <-time.After(time.Duration(gracePeriod) * time.Second): - klog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod) + klog.V(2).InfoS("preStop hook for container did not complete in time", "containerID", containerID.String(), "gracePeriod", gracePeriod) case <-done: - klog.V(3).Infof("preStop hook for container %q completed", containerID) + klog.V(3).InfoS("preStop hook for container completed", "containerID", containerID.String()) } return int64(metav1.Now().Sub(start.Time).Seconds()) diff --git a/pkg/daemon/kuberuntime/labels.go b/pkg/daemon/kuberuntime/labels.go index eacc805ef1..732af841e8 100644 --- a/pkg/daemon/kuberuntime/labels.go +++ b/pkg/daemon/kuberuntime/labels.go @@ -77,28 +77,28 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo } if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", containerHashLabel, "annotations", annotations) } if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", containerRestartCountLabel, "annotations", annotations) } if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", podDeletionGracePeriodLabel, "annotations", annotations) } if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", podTerminationGracePeriodLabel, "annotations", annotations) } preStopHandler := &v1.LifecycleHandler{} if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", containerPreStopHandlerLabel, "annotations", annotations) } else if found { containerInfo.PreStopHandler = preStopHandler } containerPorts := []v1.ContainerPort{} if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil { - klog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) + klog.ErrorS(err, "Unable to get label from annotations", "label", containerPortsLabel, "annotations", annotations) } else if found { containerInfo.ContainerPorts = containerPorts } @@ -111,7 +111,7 @@ func getStringValueFromLabel(labels map[string]string, label string) string { return value } // Do not report error, because there should be many old containers without label now. - klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).InfoS("Container doesn't have a specific label, it may be an old or invalid container", "label", label) // Return empty string "" for these containers, the caller will get value by other ways. return "" } @@ -126,7 +126,7 @@ func getIntValueFromLabel(labels map[string]string, label string) (int, error) { return intValue, nil } // Do not report error, because there should be many old containers without label now. - klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).InfoS("Container doesn't have a specific label, it may be an old or invalid container", "label", label) // Just set the value to 0 return 0, nil } @@ -141,7 +141,7 @@ func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, er return intValue, nil } // Do not report error, because there should be many old containers without label now. - klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).InfoS("Container doesn't have a specific label, it may be an old or invalid container", "label", label) // Just set the value to 0 return 0, nil } diff --git a/pkg/daemon/podprobe/pod_probe_controller.go b/pkg/daemon/podprobe/pod_probe_controller.go index aba408a64b..19f8ece40b 100644 --- a/pkg/daemon/podprobe/pod_probe_controller.go +++ b/pkg/daemon/podprobe/pod_probe_controller.go @@ -199,7 +199,7 @@ func (c *Controller) Run(stop <-chan struct{}) { return } - klog.Infof("Starting NodePodProbe controller") + klog.Info("Starting NodePodProbe controller") // Launch a worker to process resources, for there is only one nodePodProbe per Node go wait.Until(func() { for c.processNextWorkItem() { @@ -243,7 +243,7 @@ func (c *Controller) sync() error { if errors.IsNotFound(err) { return nil } else if err != nil { - klog.Errorf("Failed to get nodePodProbe %s: %v", c.nodeName, err) + klog.ErrorS(err, "Failed to get nodePodProbe", "nodeName", c.nodeName) return err } @@ -259,21 +259,21 @@ func (c *Controller) sync() error { validWorkers[key] = struct{}{} if worker, ok := c.workers[key]; ok { if !reflect.DeepEqual(probe.Probe, worker.getProbeSpec()) { - klog.Infof("NodePodProbe pod(%s) container(%s) probe changed from(%s) -> to(%s)", - key.podUID, key.containerName, commonutil.DumpJSON(worker.getProbeSpec()), commonutil.DumpJSON(probe.Probe)) + klog.InfoS("NodePodProbe pod container probe changed", + "podUID", key.podUID, "containerName", key.containerName, "from", commonutil.DumpJSON(worker.getProbeSpec()), "to", commonutil.DumpJSON(probe.Probe)) worker.updateProbeSpec(&probe.Probe) } continue } w := newWorker(c, key, &probe.Probe) c.workers[key] = w - klog.Infof("NodePodProbe run pod(%s) container(%s) probe(%s) spec(%s) worker", key.podUID, key.containerName, key.probeName, commonutil.DumpJSON(probe.Probe)) + klog.InfoS("NodePodProbe run pod container probe spec worker", "podUID", key.podUID, "containerName", key.containerName, "probeName", key.probeName, "probeSpec", commonutil.DumpJSON(probe.Probe)) go w.run() } } for key, worker := range c.workers { if _, ok := validWorkers[key]; !ok { - klog.Infof("NodePodProbe stop pod(%s/%s) container(%s) probe(%s) worker", key.podNs, key.podName, key.containerName, key.probeName) + klog.InfoS("NodePodProbe stop pod container probe worker", "namespace", key.podNs, "podName", key.podName, "containerName", key.containerName, "probeName", key.probeName) worker.stop() } } @@ -309,7 +309,7 @@ func (c *Controller) syncUpdateNodePodProbeStatus() error { if errors.IsNotFound(err) { return nil } - klog.Errorf("Get NodePodProbe(%s) failed: %s", c.nodeName, err.Error()) + klog.ErrorS(err, "Get NodePodProbe failed", "nodeName", c.nodeName) return err } validSets := sets.NewString() @@ -360,10 +360,10 @@ func (c *Controller) syncUpdateNodePodProbeStatus() error { nppClone.Status = *newStatus _, err = c.nodePodProbeClient.UpdateStatus(context.TODO(), nppClone, metav1.UpdateOptions{}) if err != nil { - klog.Errorf("NodePodProbe(%s) update status failed: %s", c.nodeName, err.Error()) + klog.ErrorS(err, "NodePodProbe update status failed", "nodeName", c.nodeName) return err } - klog.Infof("NodePodProbe(%s) update status from(%s) -> to(%s) success", c.nodeName, commonutil.DumpJSON(npp.Status), commonutil.DumpJSON(nppClone.Status)) + klog.InfoS("NodePodProbe(%s) update status success", "nodeName", c.nodeName, "from", commonutil.DumpJSON(npp.Status), "to", commonutil.DumpJSON(nppClone.Status)) return nil } @@ -377,19 +377,19 @@ func (c *Controller) removeWorker(key probeKey) { func (c *Controller) fetchLatestPodContainer(podUID, name string) (*runtimeapi.ContainerStatus, error) { // runtimeService, for example docker if c.runtimeFactory == nil { - klog.Warningf("NodePodProbe not found runtimeFactory") + klog.Warning("NodePodProbe not found runtimeFactory") return nil, nil } runtimeService := c.runtimeFactory.GetRuntimeService() if runtimeService == nil { - klog.Warningf("NodePodProbe not found runtimeService") + klog.Warning("NodePodProbe not found runtimeService") return nil, nil } containers, err := runtimeService.ListContainers(context.TODO(), &runtimeapi.ContainerFilter{ LabelSelector: map[string]string{kubelettypes.KubernetesPodUIDLabel: podUID}, }) if err != nil { - klog.Errorf("NodePodProbe pod(%s) list containers failed: %s", podUID, err.Error()) + klog.ErrorS(err, "NodePodProbe pod list containers failed", "podUID", podUID) return nil, err } var container *runtimeapi.Container diff --git a/pkg/daemon/podprobe/pod_probe_controller_test.go b/pkg/daemon/podprobe/pod_probe_controller_test.go index f013d0a5f1..83ccb709e7 100644 --- a/pkg/daemon/podprobe/pod_probe_controller_test.go +++ b/pkg/daemon/podprobe/pod_probe_controller_test.go @@ -410,7 +410,7 @@ func TestUpdateNodePodProbeStatus(t *testing.T) { func checkNodePodProbeStatusEqual(lister listersalpha1.NodePodProbeLister, expect appsv1alpha1.NodePodProbeStatus) bool { npp, err := lister.Get("node-1") if err != nil { - klog.Errorf("Get NodePodProbe failed: %s", err.Error()) + klog.ErrorS(err, "Get NodePodProbe failed") return false } for i := range npp.Status.PodProbeStatuses { diff --git a/pkg/daemon/podprobe/worker.go b/pkg/daemon/podprobe/worker.go index 347dabe30f..002bad2498 100644 --- a/pkg/daemon/podprobe/worker.go +++ b/pkg/daemon/podprobe/worker.go @@ -122,7 +122,7 @@ func (w *worker) doProbe() (keepGoing bool) { container, _ := w.probeController.fetchLatestPodContainer(w.key.podUID, w.key.containerName) if container == nil { - klog.V(5).Infof("Pod(%s/%s) container(%s) Not Found", w.key.podNs, w.key.podName, w.key.containerName) + klog.V(5).InfoS("Pod container Not Found", "namespace", w.key.podNs, "podName", w.key.podName, "containerName", w.key.containerName) return true } @@ -130,12 +130,12 @@ func (w *worker) doProbe() (keepGoing bool) { if w.containerID != "" { w.probeController.result.remove(w.containerID) } - klog.V(5).Infof("Pod(%s/%s) container(%s) Id changed(%s -> %s)", w.key.podNs, w.key.podName, w.key.containerName, w.containerID, container.Id) + klog.V(5).InfoS("Pod container Id changed", "namespace", w.key.podNs, "podName", w.key.podName, "from", w.containerID, "to", container.Id) w.containerID = container.Id w.probeController.result.set(w.containerID, w.key, w.initialValue, "") } if container.State != runtimeapi.ContainerState_CONTAINER_RUNNING { - klog.V(5).Infof("Pod(%s/%s) Non-running container(%s) probed", w.key.podNs, w.key.podName, w.key.containerName) + klog.V(5).InfoS("Pod Non-running container probed", "namespace", w.key.podNs, "podName", w.key.podName, "containerName", w.key.containerName) w.probeController.result.set(w.containerID, w.key, appsv1alpha1.ProbeFailed, fmt.Sprintf("Container(%s) is Non-running", w.key.containerName)) } @@ -146,8 +146,8 @@ func (w *worker) doProbe() (keepGoing bool) { } curDelay := int32(time.Since(time.Unix(0, container.StartedAt)).Seconds()) if curDelay < initialDelay { - klog.V(5).Infof("Pod(%s:%s) container(%s) probe(%s) initialDelay(%d), but curDelay(%d)", - w.key.podNs, w.key.podName, w.key.containerName, w.key.probeName, initialDelay, curDelay) + klog.V(5).InfoS("Pod container probe initialDelay is smaller than curDelay", + "namespace", w.key.podNs, "podName", w.key.podName, "containerName", w.key.containerName, "probeName", w.key.probeName, "initialDelay", initialDelay, "curDelay", curDelay) return true } @@ -155,8 +155,8 @@ func (w *worker) doProbe() (keepGoing bool) { // values from the running container. result, msg, err := w.probeController.prober.probe(w.spec, w.key, container, w.containerID) if err != nil { - klog.Errorf("Pod(%s/%s) do container(%s) probe(%s) spec(%s) failed: %s", - w.key.podNs, w.key.podName, w.key.containerName, w.key.probeName, util.DumpJSON(w.spec), err.Error()) + klog.ErrorS(err, "Pod do container probe spec failed", + "namespace", w.key.podNs, "podName", w.key.podName, "containerName", w.key.containerName, "probeName", w.key.probeName, "spec", util.DumpJSON(w.spec)) return true } if w.lastResult == result { @@ -190,7 +190,7 @@ func (w *worker) getProbeSpec() *appsv1alpha1.ContainerProbeSpec { func (w *worker) updateProbeSpec(spec *appsv1alpha1.ContainerProbeSpec) { if !reflect.DeepEqual(w.spec.ProbeHandler, spec.ProbeHandler) { if w.containerID != "" { - klog.Infof("Pod(%s) container(%s) probe spec changed", w.key.podUID, w.key.containerName) + klog.InfoS("Pod container probe spec changed", "podUID", w.key.podUID, "containerName", w.key.containerName) w.probeController.result.set(w.containerID, w.key, w.initialValue, "") } } diff --git a/pkg/daemon/util/healthz.go b/pkg/daemon/util/healthz.go index 6622b97cd5..399810a0d7 100644 --- a/pkg/daemon/util/healthz.go +++ b/pkg/daemon/util/healthz.go @@ -57,7 +57,7 @@ func (h *Healthz) Handler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) errMsg := fmt.Sprintf("check %v failed, err: %v", name, err) _, _ = w.Write([]byte(errMsg)) - klog.Infof("/healthz %v", errMsg) + klog.InfoS("/healthz", "message", errMsg) return } } @@ -65,7 +65,7 @@ func (h *Healthz) Handler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) _, _ = w.Write([]byte("ok")) if h.healthzCount%10 == 0 { - klog.V(6).Infof("/healthz ok %v", h.info) + klog.V(6).InfoS("/healthz ok", "message", h.info) } } diff --git a/pkg/daemon/util/secret_manager.go b/pkg/daemon/util/secret_manager.go index cbe6e0f296..5193157645 100644 --- a/pkg/daemon/util/secret_manager.go +++ b/pkg/daemon/util/secret_manager.go @@ -62,7 +62,7 @@ func (c *cacheBasedSecretManager) GetSecrets(secrets []appsv1alpha1.ReferenceObj } else { s, err := c.client.CoreV1().Secrets(secret.Namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{ResourceVersion: "0"}) if err != nil { - klog.Errorf("failed to get secret %s, err %v", secret, err) + klog.ErrorS(err, "failed to get secret", "secret", secret) } else { // renew cache in 5~10 minutes interval := time.Duration(rand.Int31n(6)+5) * time.Minute diff --git a/pkg/util/client/no_deepcopy_lister.go b/pkg/util/client/no_deepcopy_lister.go index 3272f57364..5733c2b9c2 100644 --- a/pkg/util/client/no_deepcopy_lister.go +++ b/pkg/util/client/no_deepcopy_lister.go @@ -107,7 +107,7 @@ func (r *noDeepCopyLister) List(ctx context.Context, out client.ObjectList, opts runtimeObjs = append(runtimeObjs, obj) } defer func() { - klog.V(6).Infof("Listed %v %v objects %v without DeepCopy, cost %v", gvk.GroupVersion(), gvk.Kind, len(runtimeObjs), time.Since(startTime)) + klog.V(6).InfoS("Objects listed without DeepCopy", "groupVersion", gvk.GroupVersion(), "kind", gvk.Kind, "objects", len(runtimeObjs), "cost", time.Since(startTime)) }() return apimeta.SetList(out, runtimeObjs) } diff --git a/pkg/util/controllerfinder/pods_finder.go b/pkg/util/controllerfinder/pods_finder.go index 4dfd1694ed..16bec7c20d 100644 --- a/pkg/util/controllerfinder/pods_finder.go +++ b/pkg/util/controllerfinder/pods_finder.go @@ -148,7 +148,7 @@ func (r *ControllerFinder) getReplicaSetsForObject(scale *ScaleAndSelector) ([]a rsList := &appsv1.ReplicaSetList{} selector, err := util.ValidatedLabelSelectorAsSelector(scale.Selector) if err != nil { - klog.Warningf("Object (%s/%s) get labelSelector failed: %s", scale.Metadata.Namespace, scale.Metadata.Name, err.Error()) + klog.ErrorS(err, "Object get labelSelector failed", "namespace", scale.Metadata.Namespace, "name", scale.Metadata.Name) return nil, nil } err = r.List(context.TODO(), rsList, &client.ListOptions{Namespace: scale.Metadata.Namespace, LabelSelector: selector}, utilclient.DisableDeepCopy) diff --git a/pkg/util/discovery/discovery.go b/pkg/util/discovery/discovery.go index 29ed915ca4..5179aa9d54 100644 --- a/pkg/util/discovery/discovery.go +++ b/pkg/util/discovery/discovery.go @@ -71,12 +71,12 @@ func DiscoverGVK(gvk schema.GroupVersionKind) bool { if err != nil { if err == errKindNotFound { - klog.Warningf("Not found kind %s in group version %s, waiting time %s", gvk.Kind, gvk.GroupVersion().String(), time.Since(startTime)) + klog.InfoS("Not found kind in group version", "kind", gvk.Kind, "groupVersion", gvk.GroupVersion().String(), "cost", time.Since(startTime)) return false } // This might be caused by abnormal apiserver or etcd, ignore it - klog.Errorf("Failed to find resources in group version %s: %v, waiting time %s", gvk.GroupVersion().String(), err, time.Since(startTime)) + klog.ErrorS(err, "Failed to find resources in group version", "groupVersion", gvk.GroupVersion().String(), "cost", time.Since(startTime)) } return true @@ -85,7 +85,7 @@ func DiscoverGVK(gvk schema.GroupVersionKind) bool { func DiscoverObject(obj runtime.Object) bool { gvk, err := apiutil.GVKForObject(obj, internalScheme) if err != nil { - klog.Warningf("Not recognized object %T in scheme: %v", obj, err) + klog.ErrorS(err, "Not recognized object in scheme", "object", obj) return false } return DiscoverGVK(gvk) diff --git a/pkg/util/imagejob/imagejob_reader.go b/pkg/util/imagejob/imagejob_reader.go index 4daf38a102..0bea0c538f 100644 --- a/pkg/util/imagejob/imagejob_reader.go +++ b/pkg/util/imagejob/imagejob_reader.go @@ -95,7 +95,7 @@ func GetNodeImagesForJob(reader client.Reader, job *appsv1alpha1.ImagePullJob) ( if owner != nil { newPods, err := sortingcontrol.SortPods(reader, job.Namespace, *owner, pods) if err != nil { - klog.Errorf("ImagePullJob %s/%s failed to sort Pods: %v", job.Namespace, job.Name, err) + klog.ErrorS(err, "ImagePullJob failed to sort Pods", "namespace", job.Namespace, "name", job.Name) } else { pods = newPods } @@ -110,8 +110,8 @@ func GetNodeImagesForJob(reader client.Reader, job *appsv1alpha1.ImagePullJob) ( nodeImage := &appsv1alpha1.NodeImage{} if err := reader.Get(context.TODO(), types.NamespacedName{Name: pod.Spec.NodeName}, nodeImage); err != nil { if errors.IsNotFound(err) { - klog.Warningf("Get NodeImages for ImagePullJob %s/%s, find Pod %s on Node %s but NodeImage not found", - job.Namespace, job.Name, pod.Name, pod.Spec.NodeName) + klog.InfoS("Get NodeImages for ImagePullJob, find Pod on Node but NodeImage not found", + "namespace", job.Namespace, "name", job.Name, "pod", pod.Name, "node", pod.Spec.NodeName) continue } return nil, err diff --git a/pkg/util/imagejob/utilfunction/imagejob_util.go b/pkg/util/imagejob/utilfunction/imagejob_util.go index eb4143ebd9..32ff7d538d 100644 --- a/pkg/util/imagejob/utilfunction/imagejob_util.go +++ b/pkg/util/imagejob/utilfunction/imagejob_util.go @@ -86,7 +86,7 @@ func DeleteJobsForWorkload(c client.Client, ownerObj metav1.Object) error { if owner == nil || owner.UID != ownerObj.GetUID() { continue } - klog.Infof("Deleting ImagePullJob %s for workload %s %s/%s", job.Name, owner.Kind, ownerObj.GetNamespace(), ownerObj.GetName()) + klog.InfoS("Deleting ImagePullJob for workload", "jobName", job.Name, "ownerKind", owner.Kind, "ownerNamespace", ownerObj.GetNamespace(), "ownerName", ownerObj.GetName()) if err := c.Delete(context.TODO(), job); err != nil { return err } diff --git a/pkg/util/inplaceupdate/inplace_update.go b/pkg/util/inplaceupdate/inplace_update.go index 48e7a15bc7..bf6653fef8 100644 --- a/pkg/util/inplaceupdate/inplace_update.go +++ b/pkg/util/inplaceupdate/inplace_update.go @@ -129,7 +129,7 @@ func (c *realControl) Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult { // check in-place updating has not completed yet if checkErr := opts.CheckContainersUpdateCompleted(pod, &state); checkErr != nil { - klog.V(6).Infof("Check Pod %s/%s in-place update not completed yet: %v", pod.Namespace, pod.Name, checkErr) + klog.V(6).ErrorS(checkErr, "Check Pod in-place update not completed yet", "namespace", pod.Namespace, "name", pod.Name) return RefreshResult{} } @@ -138,7 +138,7 @@ func (c *realControl) Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult { // pre-check the previous updated containers if checkErr := doPreCheckBeforeNext(pod, state.PreCheckBeforeNext); checkErr != nil { - klog.V(5).Infof("Pod %s/%s in-place update pre-check not passed: %v", pod.Namespace, pod.Name, checkErr) + klog.V(5).ErrorS(checkErr, "Pod in-place update pre-check not passed", "namespace", pod.Namespace, "name", pod.Name) return RefreshResult{} } diff --git a/pkg/util/inplaceupdate/inplace_update_defaults.go b/pkg/util/inplaceupdate/inplace_update_defaults.go index 8ab9ce2e38..e60b188c54 100644 --- a/pkg/util/inplaceupdate/inplace_update_defaults.go +++ b/pkg/util/inplaceupdate/inplace_update_defaults.go @@ -68,7 +68,7 @@ func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { // defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) { - klog.V(5).Infof("Begin to in-place update pod %s/%s with update spec %v, state %v", pod.Namespace, pod.Name, util.DumpJSON(spec), util.DumpJSON(state)) + klog.V(5).InfoS("Begin to in-place update pod", "namespace", pod.Namespace, "name", pod.Name, "spec", util.DumpJSON(spec), "state", util.DumpJSON(state)) state.NextContainerImages = make(map[string]string) state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) @@ -172,7 +172,7 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.I Containers: containersToUpdate.List(), }) - klog.V(5).Infof("Decide to in-place update pod %s/%s with state %v", pod.Namespace, pod.Name, util.DumpJSON(state)) + klog.V(5).InfoS("Decide to in-place update pod", "namespace", pod.Namespace, "name", pod.Name, "state", util.DumpJSON(state)) inPlaceUpdateStateJSON, _ := json.Marshal(state) pod.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) @@ -203,8 +203,8 @@ func addMetadataSharedContainersToUpdate(pod *v1.Pod, containersToUpdate sets.St } for _, key := range labelsToUpdate.UnsortedList() { if _, exists := objMeta.Labels[key]; exists { - klog.Warningf("Has to in-place update container %s with lower priority in Pod %s/%s, for the label %s it shared has changed", - cName, pod.Namespace, pod.Name, key) + klog.InfoS("Has to in-place update container with lower priority in Pod, for the label it shared has changed", + "containerName", cName, "namespace", pod.Namespace, "name", pod.Name, "label", key) containersToUpdate.Insert(cName) newToUpdate.Insert(cName) break @@ -212,8 +212,8 @@ func addMetadataSharedContainersToUpdate(pod *v1.Pod, containersToUpdate sets.St } for _, key := range annotationsToUpdate.UnsortedList() { if _, exists := objMeta.Annotations[key]; exists { - klog.Warningf("Has to in-place update container %s with lower priority in Pod %s/%s, for the annotation %s it shared has changed", - cName, pod.Namespace, pod.Name, key) + klog.InfoS("Has to in-place update container with lower priority in Pod, for the annotation it shared has changed", + "containerName", cName, "namespace", pod.Namespace, "podName", pod.Name, "annotation", key) containersToUpdate.Insert(cName) newToUpdate.Insert(cName) break @@ -385,7 +385,7 @@ func defaultCheckContainersInPlaceUpdateCompleted(pod *v1.Pod, inPlaceUpdateStat if runtimeContainerMetaSet != nil { if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, plainHash) { - klog.V(5).Infof("Check Pod %s/%s in-place update completed for all container hash consistent", pod.Namespace, pod.Name) + klog.V(5).InfoS("Check Pod in-place update completed for all container hash consistent", "namespace", pod.Namespace, "name", pod.Name) return nil } // If it needs not to update envs from metadata, we don't have to return error here, @@ -443,7 +443,7 @@ func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *apps } } if containerStatus == nil { - klog.Warningf("Find no container %s in status for Pod %s/%s", containerSpec.Name, pod.Namespace, pod.Name) + klog.InfoS("Find no container in status for Pod", "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name) return false } @@ -455,28 +455,31 @@ func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *apps } } if containerMeta == nil { - klog.Warningf("Find no container %s in runtime-container-meta for Pod %s/%s", containerSpec.Name, pod.Namespace, pod.Name) + klog.InfoS("Find no container in runtime-container-meta for Pod", "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name) return false } if containerMeta.ContainerID != containerStatus.ContainerID { - klog.Warningf("Find container %s in runtime-container-meta for Pod %s/%s has different containerID with status %s != %s", - containerSpec.Name, pod.Namespace, pod.Name, containerMeta.ContainerID, containerStatus.ContainerID) + klog.InfoS("Find container in runtime-container-meta for Pod has different containerID with status", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaID", containerMeta.ContainerID, "statusID", containerStatus.ContainerID) return false } switch hashType { case plainHash: if expectedHash := kubeletcontainer.HashContainer(containerSpec); containerMeta.Hashes.PlainHash != expectedHash { - klog.Warningf("Find container %s in runtime-container-meta for Pod %s/%s has different plain hash with spec %v != %v", - containerSpec.Name, pod.Namespace, pod.Name, containerMeta.Hashes.PlainHash, expectedHash) + klog.InfoS("Find container in runtime-container-meta for Pod has different plain hash with spec", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaHash", containerMeta.Hashes.PlainHash, "expectedHash", expectedHash) return false } case extractedEnvFromMetadataHash: hasher := utilcontainermeta.NewEnvFromMetadataHasher() if expectedHash := hasher.GetExpectHash(containerSpec, pod); containerMeta.Hashes.ExtractedEnvFromMetadataHash != expectedHash { - klog.Warningf("Find container %s in runtime-container-meta for Pod %s/%s has different extractedEnvFromMetadataHash with spec %v != %v", - containerSpec.Name, pod.Namespace, pod.Name, containerMeta.Hashes.ExtractedEnvFromMetadataHash, expectedHash) + klog.InfoS("Find container in runtime-container-meta for Pod has different extractedEnvFromMetadataHash with spec", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaHash", containerMeta.Hashes.ExtractedEnvFromMetadataHash, "expectedHash", expectedHash) return false } } diff --git a/pkg/util/lifecycle/lifecycle_utils.go b/pkg/util/lifecycle/lifecycle_utils.go index ea819ace2c..7fecfa7de0 100644 --- a/pkg/util/lifecycle/lifecycle_utils.go +++ b/pkg/util/lifecycle/lifecycle_utils.go @@ -116,8 +116,8 @@ func (c *realControl) executePodNotReadyPolicy(pod *v1.Pod, state appspub.Lifecy } if err != nil { - klog.Errorf("Failed to set pod(%v) Ready/NotReady at %s lifecycle state, error: %v", - client.ObjectKeyFromObject(pod), state, err) + klog.ErrorS(err, "Failed to set pod Ready/NotReady at lifecycle state", + "pod", client.ObjectKeyFromObject(pod), "state", state) } return } diff --git a/pkg/util/pods.go b/pkg/util/pods.go index f2cd1c7664..a17901018c 100644 --- a/pkg/util/pods.go +++ b/pkg/util/pods.go @@ -369,7 +369,7 @@ func ExtractPort(param intstr.IntOrString, container v1.Container) (int, error) case intstr.String: if port, err = findPortByName(container, param.StrVal); err != nil { // Last ditch effort - maybe it was an int stored as string? - klog.Errorf("error : %v", err) + klog.ErrorS(err, "failed to find port by name") if port, err = strconv.Atoi(param.StrVal); err != nil { return port, err } diff --git a/pkg/util/secret/parse.go b/pkg/util/secret/parse.go index d7334f1584..2168e2e129 100644 --- a/pkg/util/secret/parse.go +++ b/pkg/util/secret/parse.go @@ -28,7 +28,7 @@ var ( // make and set new docker keyring func MakeAndSetKeyring() { - klog.Infof("make and set new docker keyring") + klog.Info("make and set new docker keyring") keyring = credentialprovider.NewDockerKeyring() } diff --git a/pkg/util/tools.go b/pkg/util/tools.go index 84b4388d4f..5bce1fe2bf 100644 --- a/pkg/util/tools.go +++ b/pkg/util/tools.go @@ -138,13 +138,13 @@ func IsImageDigest(image string) bool { func IsContainerImageEqual(image1, image2 string) bool { repo1, tag1, digest1, err := ParseImage(image1) if err != nil { - klog.Errorf("parse image %s failed: %s", image1, err.Error()) + klog.ErrorS(err, "parse image failed", "image", image1) return false } repo2, tag2, digest2, err := ParseImage(image2) if err != nil { - klog.Errorf("parse image %s failed: %s", image2, err.Error()) + klog.ErrorS(err, "parse image failed", "image", image2) return false } diff --git a/pkg/util/workloadspread/workloadspread.go b/pkg/util/workloadspread/workloadspread.go index 57fa68063c..77b3c3aace 100644 --- a/pkg/util/workloadspread/workloadspread.go +++ b/pkg/util/workloadspread/workloadspread.go @@ -145,14 +145,14 @@ func VerifyGroupKind(ref interface{}, expectedKind string, expectedGroups []stri case *appsv1alpha1.TargetReference: gv, err = schema.ParseGroupVersion(ref.(*appsv1alpha1.TargetReference).APIVersion) if err != nil { - klog.Errorf("failed to parse GroupVersion for apiVersion (%s): %s", ref.(*appsv1alpha1.TargetReference).APIVersion, err.Error()) + klog.ErrorS(err, "failed to parse GroupVersion for apiVersion", "apiVersion", ref.(*appsv1alpha1.TargetReference).APIVersion) return false, err } kind = ref.(*appsv1alpha1.TargetReference).Kind case *metav1.OwnerReference: gv, err = schema.ParseGroupVersion(ref.(*metav1.OwnerReference).APIVersion) if err != nil { - klog.Errorf("failed to parse GroupVersion for apiVersion (%s): %s", ref.(*metav1.OwnerReference).APIVersion, err.Error()) + klog.ErrorS(err, "failed to parse GroupVersion for apiVersion", "apiVersion", ref.(*metav1.OwnerReference).APIVersion) return false, err } kind = ref.(*metav1.OwnerReference).Kind @@ -236,8 +236,8 @@ func (h *Handler) HandlePodCreation(pod *corev1.Pod) (skip bool, err error) { } defer func() { - klog.V(3).Infof("Cost of handling pod creation by WorkloadSpread (%s/%s) is %v", - matchedWS.Namespace, matchedWS.Name, time.Since(start)) + klog.V(3).InfoS("Cost of handling pod creation by WorkloadSpread", + "namespace", matchedWS.Namespace, "name", matchedWS.Name, "cost", time.Since(start)) }() return false, h.mutatingPod(matchedWS, pod, nil, CreateOperation) @@ -253,8 +253,8 @@ func (h *Handler) HandlePodDeletion(pod *corev1.Pod, operation Operation) error } err := json.Unmarshal([]byte(str), &injectWS) if err != nil { - klog.Errorf("parse Pod (%s/%s) annotations[%s]=%s failed: %s", pod.Namespace, pod.Name, - MatchedWorkloadSpreadSubsetAnnotations, str, err.Error()) + klog.ErrorS(err, "parse Pod annotations failed", "namespace", pod.Namespace, "name", pod.Name, + "key", MatchedWorkloadSpreadSubsetAnnotations, "value", str) return nil } @@ -270,16 +270,16 @@ func (h *Handler) HandlePodDeletion(pod *corev1.Pod, operation Operation) error err = h.Client.Get(context.TODO(), client.ObjectKey{Namespace: pod.Namespace, Name: injectWS.Name}, matchedWS) if err != nil { if errors.IsNotFound(err) { - klog.Warningf("Pod(%s/%s) matched WorkloadSpread(%s) Not Found", pod.Namespace, pod.Name, injectWS.Name) + klog.InfoS("Pod matched WorkloadSpread Not Found", "namespace", pod.Namespace, "name", pod.Name, "workloadSpread", injectWS.Name) return nil } - klog.Errorf("get pod(%s/%s) matched workloadSpread(%s) failed: %s", pod.Namespace, pod.Name, injectWS.Name, err.Error()) + klog.ErrorS(err, "get pod matched workloadSpread failed", "namespace", pod.Namespace, "name", pod.Name, "workloadSpread", injectWS.Name) return err } defer func() { - klog.V(3).Infof("Cost of handling pod deletion by WorkloadSpread (%s/%s) is %v", - matchedWS.Namespace, matchedWS.Name, time.Since(start)) + klog.V(3).InfoS("Cost of handling pod deletion by WorkloadSpread", + "namespace", matchedWS.Namespace, "name", matchedWS.Name, "cost", time.Since(start)) }() return h.mutatingPod(matchedWS, pod, injectWS, operation) @@ -294,7 +294,7 @@ func (h *Handler) mutatingPod(matchedWS *appsv1alpha1.WorkloadSpread, podName = pod.GetGenerateName() } - klog.V(3).Infof("Operation[%s] Pod(%s/%s) matched WorkloadSpread(%s/%s)", operation, pod.Namespace, podName, matchedWS.Namespace, matchedWS.Name) + klog.V(3).InfoS("Operation Pod matched WorkloadSpread", "operation", operation, "podNs", pod.Namespace, "podName", podName, "wsNs", matchedWS.Namespace, "wsName", matchedWS.Name) suitableSubsetName, generatedUID, err := h.acquireSuitableSubset(matchedWS, pod, injectWS, operation) if err != nil { @@ -305,16 +305,16 @@ func (h *Handler) mutatingPod(matchedWS *appsv1alpha1.WorkloadSpread, // if create pod, inject affinity、toleration、metadata in pod object if operation == CreateOperation && len(suitableSubsetName) > 0 { if _, injectErr = injectWorkloadSpreadIntoPod(matchedWS, pod, suitableSubsetName, generatedUID); injectErr != nil { - klog.Errorf("failed to inject Pod(%s/%s) subset(%s) data for WorkloadSpread(%s/%s)", - pod.Namespace, podName, suitableSubsetName, matchedWS.Namespace, matchedWS.Name) + klog.InfoS("failed to inject Pod subset data for WorkloadSpread", + "podNs", pod.Namespace, "podName", podName, "suitableSubsetName", suitableSubsetName, "wsNs", matchedWS.Namespace, "wsName", matchedWS.Name) return injectErr } - klog.V(3).Infof("inject Pod(%s/%s) subset(%s) data for WorkloadSpread(%s/%s)", - pod.Namespace, podName, suitableSubsetName, matchedWS.Namespace, matchedWS.Name) + klog.V(3).InfoS("inject Pod subset data for WorkloadSpread", + "podNs", pod.Namespace, "podName", podName, "suitableSubsetName", suitableSubsetName, "wsNs", matchedWS.Namespace, "wsName", matchedWS.Name) } - klog.V(3).Infof("handler operation[%s] Pod(%s/%s) generatedUID(%s) for WorkloadSpread(%s/%s) done", - operation, pod.Namespace, podName, generatedUID, matchedWS.Namespace, matchedWS.Name) + klog.V(3).InfoS("handler operation Pod generatedUID for WorkloadSpread done", + "operation", operation, "podNs", pod.Namespace, "podName", podName, "generatedUID", generatedUID, "wsNs", matchedWS.Namespace, "wsName", matchedWS.Name) return injectErr } @@ -401,17 +401,17 @@ func (h *Handler) acquireSuitableSubset(matchedWS *appsv1alpha1.WorkloadSpread, refresh = true conflictTimes++ } else { - klog.V(3).Infof("update workloadSpread(%s/%s) SubsetStatus(%s) missingReplicas(%d) creatingPods(%d) deletingPods(%d) success", - wsClone.Namespace, wsClone.Name, suitableSubset.Name, - suitableSubset.MissingReplicas, len(suitableSubset.CreatingPods), len(suitableSubset.DeletingPods)) + klog.V(3).InfoS("update WorkloadSpread success", + "namespace", wsClone.Namespace, "name", wsClone.Name, "subsetStatus", suitableSubset.Name, + "missingReplicas", suitableSubset.MissingReplicas, "creatingPods", len(suitableSubset.CreatingPods), "deletingPods", len(suitableSubset.DeletingPods)) if cacheErr := util.GlobalCache.Add(wsClone); cacheErr != nil { - klog.Warningf("Failed to update workloadSpread(%s/%s) cache after update status, err: %v", wsClone.Namespace, wsClone.Name, cacheErr) + klog.ErrorS(cacheErr, "Failed to update workloadSpread cache after update status", "namespace", wsClone.Namespace, "name", wsClone.Name) } } costOfUpdate += time.Since(start) return err }); err != nil { - klog.Errorf("update WorkloadSpread(%s/%s) error %s", matchedWS.Namespace, matchedWS.Name, err.Error()) + klog.ErrorS(err, "update WorkloadSpread error", "namespace", matchedWS.Namespace, "name", matchedWS.Name) return "", "", err } } @@ -420,8 +420,8 @@ func (h *Handler) acquireSuitableSubset(matchedWS *appsv1alpha1.WorkloadSpread, suitableSubsetName = suitableSubset.Name } - klog.V(5).Infof("Cost of assigning suitable subset of WorkloadSpread (%s %s) for pod is: conflict times: %v, cost of Get %v, cost of Update %v", - matchedWS.Namespace, matchedWS.Name, conflictTimes, costOfGet, costOfUpdate) + klog.V(5).InfoS("Cost of assigning suitable subset of WorkloadSpread for pod", + "namespace", matchedWS.Namespace, "name", matchedWS.Name, "conflictTimes", conflictTimes, "costOfGet", costOfGet, "costOfUpdate", costOfUpdate) return suitableSubsetName, generatedUID, nil } @@ -439,13 +439,13 @@ func (h *Handler) tryToGetTheLatestMatchedWS(matchedWS *appsv1alpha1.WorkloadSpr if errors.IsNotFound(err) { return nil, nil } - klog.Errorf("error getting updated WorkloadSpread(%s/%s) from APIServer, err: %v", matchedWS.Namespace, matchedWS.Name, err) + klog.ErrorS(err, "error getting updated WorkloadSpread from APIServer", "namespace", matchedWS.Namespace, "name", matchedWS.Name) return nil, err } } else { item, _, cacheErr := util.GlobalCache.Get(matchedWS) if cacheErr != nil { - klog.Errorf("Failed to get cached WorkloadSpread(%s/%s) from GlobalCache, err: %v", matchedWS.Namespace, matchedWS.Name, cacheErr) + klog.ErrorS(cacheErr, "Failed to get cached WorkloadSpread from GlobalCache", "namespace", matchedWS.Namespace, "name", matchedWS.Name) } if localCachedWS, ok := item.(*appsv1alpha1.WorkloadSpread); ok { wsClone = localCachedWS.DeepCopy() @@ -506,8 +506,8 @@ func (h *Handler) updateSubsetForPod(ws *appsv1alpha1.WorkloadSpread, suitableSubset = h.getSuitableSubset(subsetStatuses) if suitableSubset == nil { - klog.Warningf("WorkloadSpread (%s/%s) don't have a suitable subset for Pod (%s) when creating", - ws.Namespace, ws.Name, pod.Name) + klog.InfoS("WorkloadSpread don't have a suitable subset for Pod when creating", + "namespace", ws.Namespace, "wsName", ws.Name, "podName", pod.Name) return false, nil, "", nil } // no need to update WorkloadSpread status if MaxReplicas == nil @@ -536,8 +536,8 @@ func (h *Handler) updateSubsetForPod(ws *appsv1alpha1.WorkloadSpread, suitableSubset = getSpecificSubset(subsetStatuses, injectWS.Subset) if suitableSubset == nil { - klog.V(5).Infof("Pod (%s/%s) matched WorkloadSpread (%s) not found Subset(%s) when deleting", - ws.Namespace, pod.Name, ws.Name, injectWS.Subset) + klog.V(5).InfoS("Pod matched WorkloadSpread not found Subset when deleting", + "namespace", ws.Namespace, "podName", pod.Name, "wsName", ws.Name, "subset", injectWS.Subset) return false, nil, "", nil } if suitableSubset.MissingReplicas == -1 { @@ -625,12 +625,12 @@ func injectWorkloadSpreadIntoPod(ws *appsv1alpha1.WorkloadSpread, pod *corev1.Po cloneBytes, _ := json.Marshal(pod) modified, err := strategicpatch.StrategicMergePatch(cloneBytes, subset.Patch.Raw, &corev1.Pod{}) if err != nil { - klog.Errorf("failed to merge patch raw %s", subset.Patch.Raw) + klog.ErrorS(err, "failed to merge patch raw", "raw", subset.Patch.Raw) return false, err } newPod := &corev1.Pod{} if err = json.Unmarshal(modified, newPod); err != nil { - klog.Errorf("failed to unmarshal %s to Pod", modified) + klog.ErrorS(err, "failed to unmarshal to Pod", "pod", modified) return false, err } *pod = *newPod @@ -694,13 +694,13 @@ func (h *Handler) isReferenceEqual(target *appsv1alpha1.TargetReference, owner * targetGv, err := schema.ParseGroupVersion(target.APIVersion) if err != nil { - klog.Errorf("parse TargetReference apiVersion (%s) failed: %s", target.APIVersion, err.Error()) + klog.ErrorS(err, "parse TargetReference apiVersion failed", "apiVersion", target.APIVersion) return false } ownerGv, err := schema.ParseGroupVersion(owner.APIVersion) if err != nil { - klog.Errorf("parse OwnerReference apiVersion (%s) failed: %s", owner.APIVersion, err.Error()) + klog.ErrorS(err, "parse OwnerReference apiVersion failed", "apiVersion", owner.APIVersion) return false } @@ -714,7 +714,7 @@ func (h *Handler) isReferenceEqual(target *appsv1alpha1.TargetReference, owner * ownerObject, err := h.getObjectOf(owner, namespace) if err != nil { - klog.Errorf("Failed to get owner object %v: %v", owner, err) + klog.ErrorS(err, "Failed to get owner object", "owner", owner) return false } @@ -872,7 +872,7 @@ func GetReplicasFromCustomWorkload(reader client.Reader, object *unstructured.Un } whiteList, err := configuration.GetWSWatchCustomWorkloadWhiteList(reader) if err != nil { - klog.Errorf("Failed to get workloadSpread custom workload white list from kruise config map") + klog.Error("Failed to get workloadSpread custom workload white list from kruise config map") return 0 } @@ -887,12 +887,12 @@ func GetReplicasFromCustomWorkload(reader client.Reader, object *unstructured.Un if len(path) > 0 { replicas, exists, err = unstructured.NestedInt64(object.Object, path...) if err != nil || !exists { - klog.Errorf("Failed to get replicas from %v, replicas path %s", gvk, wl.ReplicasPath) + klog.ErrorS(err, "Failed to get replicas", "from", gvk, "replicasPath", wl.ReplicasPath) } } else { replicas, exists, err = unstructured.NestedInt64(object.Object, "spec", "replicas") if err != nil || !exists { - klog.Errorf("Failed to get replicas from %v, replicas path %s", gvk, wl.ReplicasPath) + klog.ErrorS(err, "Failed to get replicas", "from", gvk, "replicasPath", wl.ReplicasPath) } } return int32(replicas) diff --git a/pkg/webhook/builtinworkloads/validating/builtin_handlers.go b/pkg/webhook/builtinworkloads/validating/builtin_handlers.go index 9c599b4e45..f9daa83402 100644 --- a/pkg/webhook/builtinworkloads/validating/builtin_handlers.go +++ b/pkg/webhook/builtinworkloads/validating/builtin_handlers.go @@ -47,7 +47,7 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm return admission.ValidationResponse(true, "") } if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate %s %s/%s for no old object, maybe because of Kubernetes version < 1.16", req.Kind.Kind, req.Namespace, req.Name) + klog.InfoS("Skip to validate for no old object, maybe because of Kubernetes version < 1.16", "kind", req.Kind.Kind, "namespace", req.Namespace, "name", req.Name) return admission.ValidationResponse(true, "") } @@ -76,7 +76,7 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm metaObj = obj replicas = obj.Spec.Replicas default: - klog.Warningf("Skip to validate %s %s/%s for unsupported resource", req.Kind.Kind, req.Namespace, req.Name) + klog.InfoS("Skip to validate for unsupported resource", "kind", req.Kind.Kind, "namespace", req.Namespace, "name", req.Name) return admission.ValidationResponse(true, "") } diff --git a/pkg/webhook/cloneset/mutating/cloneset_create_update_handler.go b/pkg/webhook/cloneset/mutating/cloneset_create_update_handler.go index e3c156b34b..1725a599f1 100644 --- a/pkg/webhook/cloneset/mutating/cloneset_create_update_handler.go +++ b/pkg/webhook/cloneset/mutating/cloneset_create_update_handler.go @@ -77,7 +77,7 @@ func (h *CloneSetCreateUpdateHandler) Handle(ctx context.Context, req admission. } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit CloneSet %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit CloneSet patches", "namespace", obj.Namespace, "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp } diff --git a/pkg/webhook/cloneset/validating/cloneset_create_update_handler.go b/pkg/webhook/cloneset/validating/cloneset_create_update_handler.go index 84c4859d00..45b486a242 100644 --- a/pkg/webhook/cloneset/validating/cloneset_create_update_handler.go +++ b/pkg/webhook/cloneset/validating/cloneset_create_update_handler.go @@ -69,7 +69,7 @@ func (h *CloneSetCreateUpdateHandler) Handle(ctx context.Context, req admission. } case admissionv1.Delete: if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate CloneSet %s/%s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Namespace, req.Name) + klog.InfoS("Skip to validate CloneSet %s/%s deletion for no old object, maybe because of Kubernetes version < 1.16", "namespace", req.Namespace, "name", req.Name) return admission.ValidationResponse(true, "") } if err := h.Decoder.DecodeRaw(req.AdmissionRequest.OldObject, oldObj); err != nil { diff --git a/pkg/webhook/customresourcedefinition/validating/crd_handler.go b/pkg/webhook/customresourcedefinition/validating/crd_handler.go index bb83c436df..e62fe1229e 100644 --- a/pkg/webhook/customresourcedefinition/validating/crd_handler.go +++ b/pkg/webhook/customresourcedefinition/validating/crd_handler.go @@ -48,7 +48,7 @@ func (h *CRDHandler) Handle(ctx context.Context, req admission.Request) admissio return admission.ValidationResponse(true, "") } if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate CRD %s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Name) + klog.InfoS("Skip to validate CRD %s deletion for no old object, maybe because of Kubernetes version < 1.16", "name", req.Name) return admission.ValidationResponse(true, "") } @@ -80,7 +80,7 @@ func (h *CRDHandler) Handle(ctx context.Context, req admission.Request) admissio } } default: - klog.Warningf("Skip to validate CRD %s deletion for unrecognized version %s", req.Name, req.Kind.Version) + klog.InfoS("Skip to validate CRD deletion for unrecognized version %s", "name", req.Name, "version", req.Kind.Version) return admission.ValidationResponse(true, "") } diff --git a/pkg/webhook/daemonset/mutating/daemonset_mutating_handler.go b/pkg/webhook/daemonset/mutating/daemonset_mutating_handler.go index 74e0005004..bbd87c0b37 100644 --- a/pkg/webhook/daemonset/mutating/daemonset_mutating_handler.go +++ b/pkg/webhook/daemonset/mutating/daemonset_mutating_handler.go @@ -64,7 +64,7 @@ func (h *DaemonSetCreateUpdateHandler) Handle(ctx context.Context, req admission } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit DaemonSet %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit DaemonSet patches", "namespace", obj.Namespace, "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp diff --git a/pkg/webhook/daemonset/validating/daemonset_create_update_handler.go b/pkg/webhook/daemonset/validating/daemonset_create_update_handler.go index ebe2e9c268..2a269f5251 100644 --- a/pkg/webhook/daemonset/validating/daemonset_create_update_handler.go +++ b/pkg/webhook/daemonset/validating/daemonset_create_update_handler.go @@ -73,7 +73,7 @@ func (h *DaemonSetCreateUpdateHandler) Handle(ctx context.Context, req admission } allowed, reason, err := validatingDaemonSetFn(ctx, obj) if err != nil { - klog.Warningf("ds %s/%s action %v fail:%s", obj.Namespace, obj.Name, req.AdmissionRequest.Operation, err.Error()) + klog.ErrorS(err, "validate daemonset failed", "namespace", obj.Namespace, "name", obj.Name, "operation", req.AdmissionRequest.Operation) return admission.Errored(http.StatusInternalServerError, err) } return admission.ValidationResponse(allowed, reason) diff --git a/pkg/webhook/ephemeraljob/validating/ephemeraljob_create_update_handler.go b/pkg/webhook/ephemeraljob/validating/ephemeraljob_create_update_handler.go index 4c71b2f39b..eeaed7ab62 100644 --- a/pkg/webhook/ephemeraljob/validating/ephemeraljob_create_update_handler.go +++ b/pkg/webhook/ephemeraljob/validating/ephemeraljob_create_update_handler.go @@ -52,7 +52,7 @@ func (h *EphemeralJobCreateUpdateHandler) Handle(ctx context.Context, req admiss } if err := validate(obj); err != nil { - klog.Warningf("Error validate EphemeralJob %s: %v", obj.Name, err) + klog.ErrorS(err, "Error validate EphemeralJob", "name", obj.Name) return admission.Errored(http.StatusBadRequest, err) } diff --git a/pkg/webhook/imagelistpulljob/mutating/imagelistpulljob_create_update_handler.go b/pkg/webhook/imagelistpulljob/mutating/imagelistpulljob_create_update_handler.go index 2329b073ac..9b0214e6b1 100644 --- a/pkg/webhook/imagelistpulljob/mutating/imagelistpulljob_create_update_handler.go +++ b/pkg/webhook/imagelistpulljob/mutating/imagelistpulljob_create_update_handler.go @@ -57,7 +57,7 @@ func (h *ImageListPullJobCreateUpdateHandler) Handle(ctx context.Context, req ad } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit ImageListPullJob %s patches: %v", obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit ImageListPullJob patches", "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp diff --git a/pkg/webhook/imagelistpulljob/validating/imagelistpulljob_create_update_handler.go b/pkg/webhook/imagelistpulljob/validating/imagelistpulljob_create_update_handler.go index 47efcb37ce..bee46db12b 100644 --- a/pkg/webhook/imagelistpulljob/validating/imagelistpulljob_create_update_handler.go +++ b/pkg/webhook/imagelistpulljob/validating/imagelistpulljob_create_update_handler.go @@ -56,7 +56,7 @@ func (h *ImageListPullJobCreateUpdateHandler) Handle(ctx context.Context, req ad } if err := validate(obj); err != nil { - klog.Warningf("Error validate ImageListPullJob %s/%s: %v", obj.Namespace, obj.Name, err) + klog.ErrorS(err, "Error validate ImageListPullJob", "namespace", obj.Namespace, "name", obj.Name) return admission.Errored(http.StatusBadRequest, err) } diff --git a/pkg/webhook/imagepulljob/mutating/imagepulljob_create_update_handler.go b/pkg/webhook/imagepulljob/mutating/imagepulljob_create_update_handler.go index 76f8a76914..bd970f1b90 100644 --- a/pkg/webhook/imagepulljob/mutating/imagepulljob_create_update_handler.go +++ b/pkg/webhook/imagepulljob/mutating/imagepulljob_create_update_handler.go @@ -58,7 +58,7 @@ func (h *ImagePullJobCreateUpdateHandler) Handle(ctx context.Context, req admiss } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit ImagePullJob %s patches: %v", obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit ImagePullJob patches", "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp diff --git a/pkg/webhook/imagepulljob/validating/imagepulljob_create_update_handler.go b/pkg/webhook/imagepulljob/validating/imagepulljob_create_update_handler.go index 2f4fe82646..28d0e41e5f 100644 --- a/pkg/webhook/imagepulljob/validating/imagepulljob_create_update_handler.go +++ b/pkg/webhook/imagepulljob/validating/imagepulljob_create_update_handler.go @@ -57,7 +57,7 @@ func (h *ImagePullJobCreateUpdateHandler) Handle(ctx context.Context, req admiss } if err := validate(obj); err != nil { - klog.Warningf("Error validate ImagePullJob %s/%s: %v", obj.Namespace, obj.Name, err) + klog.ErrorS(err, "Error validate ImagePullJob", "namespace", obj.Namespace, "name", obj.Name) return admission.Errored(http.StatusBadRequest, err) } diff --git a/pkg/webhook/ingress/validating/ingress_handler.go b/pkg/webhook/ingress/validating/ingress_handler.go index b522ac9f5e..1adbab7a8d 100644 --- a/pkg/webhook/ingress/validating/ingress_handler.go +++ b/pkg/webhook/ingress/validating/ingress_handler.go @@ -46,7 +46,7 @@ func (h *IngressHandler) Handle(ctx context.Context, req admission.Request) admi return admission.ValidationResponse(true, "") } if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate ingress %s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Name) + klog.InfoS("Skip to validate ingress deletion for no old object, maybe because of Kubernetes version < 1.16", "name", req.Name) return admission.ValidationResponse(true, "") } diff --git a/pkg/webhook/namespace/validating/namespace_handler.go b/pkg/webhook/namespace/validating/namespace_handler.go index b6a0618d58..030c8549a6 100644 --- a/pkg/webhook/namespace/validating/namespace_handler.go +++ b/pkg/webhook/namespace/validating/namespace_handler.go @@ -45,7 +45,7 @@ func (h *NamespaceHandler) Handle(ctx context.Context, req admission.Request) ad return admission.ValidationResponse(true, "") } if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate namespace %s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Name) + klog.InfoS("Skip to validate namespace deletion for no old object, maybe because of Kubernetes version < 1.16", "name", req.Name) return admission.ValidationResponse(true, "") } obj := &v1.Namespace{} diff --git a/pkg/webhook/nodeimage/mutating/nodeimage_create_update_handler.go b/pkg/webhook/nodeimage/mutating/nodeimage_create_update_handler.go index 22d43308a1..03225a9a94 100644 --- a/pkg/webhook/nodeimage/mutating/nodeimage_create_update_handler.go +++ b/pkg/webhook/nodeimage/mutating/nodeimage_create_update_handler.go @@ -69,7 +69,7 @@ func (h *NodeImageCreateUpdateHandler) Handle(ctx context.Context, req admission } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit NodeImage %s patches: %v", obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit NodeImage patches", "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp diff --git a/pkg/webhook/nodeimage/validating/nodeimage_create_update_handler.go b/pkg/webhook/nodeimage/validating/nodeimage_create_update_handler.go index f742ccfd80..9ad991a886 100644 --- a/pkg/webhook/nodeimage/validating/nodeimage_create_update_handler.go +++ b/pkg/webhook/nodeimage/validating/nodeimage_create_update_handler.go @@ -55,7 +55,7 @@ func (h *NodeImageCreateUpdateHandler) Handle(ctx context.Context, req admission } if err := validate(obj); err != nil { - klog.Warningf("Error validate NodeImage %s: %v", obj.Name, err) + klog.ErrorS(err, "Error validate NodeImage", "name", obj.Name) return admission.Errored(http.StatusBadRequest, err) } diff --git a/pkg/webhook/pod/mutating/container_launch_priority_initialization.go b/pkg/webhook/pod/mutating/container_launch_priority_initialization.go index ee9d5e0ac2..f7d32ae98f 100644 --- a/pkg/webhook/pod/mutating/container_launch_priority_initialization.go +++ b/pkg/webhook/pod/mutating/container_launch_priority_initialization.go @@ -33,7 +33,7 @@ func (h *PodCreateHandler) containerLaunchPriorityInitialization(_ context.Conte priority[i] = 0 - i } h.setPodEnv(priority, pod) - klog.V(3).Infof("Injected ordered container launch priority for Pod %s/%s", pod.Namespace, pod.Name) + klog.V(3).InfoS("Injected ordered container launch priority for Pod", "namespace", pod.Namespace, "name", pod.Name) return false, nil } @@ -47,7 +47,7 @@ func (h *PodCreateHandler) containerLaunchPriorityInitialization(_ context.Conte } h.setPodEnv(priority, pod) - klog.V(3).Infof("Injected customized container launch priority for Pod %s/%s", pod.Namespace, pod.Name) + klog.V(3).InfoS("Injected customized container launch priority for Pod", "namespace", pod.Namespace, "name", pod.Name) return false, nil } diff --git a/pkg/webhook/pod/mutating/enhancedlivenessprobe_handler.go b/pkg/webhook/pod/mutating/enhancedlivenessprobe_handler.go index 602592a0c6..c1e6af4c3b 100644 --- a/pkg/webhook/pod/mutating/enhancedlivenessprobe_handler.go +++ b/pkg/webhook/pod/mutating/enhancedlivenessprobe_handler.go @@ -37,13 +37,13 @@ func (h *PodCreateHandler) enhancedLivenessProbeWhenPodCreate(ctx context.Contex context, err := removeAndBackUpPodContainerLivenessProbe(pod) if err != nil { - klog.Errorf("Remove pod (%v/%v) container livenessProbe config and backup error: %v", pod.Namespace, pod.Name, err) + klog.ErrorS(err, "Remove pod container livenessProbe config and backup error", "namespace", pod.Namespace, "name", pod.Name) return false, err } if context == "" { return true, nil } - klog.V(3).Infof("Mutating add pod(%s/%s) annotation[%s]=%s", pod.Namespace, pod.Name, alpha1.AnnotationNativeContainerProbeContext, context) + klog.V(3).InfoS("Mutating add pod annotation", "namespace", pod.Namespace, "name", pod.Name, "key", alpha1.AnnotationNativeContainerProbeContext, "value", context) return false, nil } @@ -69,8 +69,8 @@ func removeAndBackUpPodContainerLivenessProbe(pod *v1.Pod) (string, error) { } containersLivenessProbeRaw, err := json.Marshal(containersLivenessProbe) if err != nil { - klog.Errorf("Failed to json marshal %v for pod: %v/%v, err: %v", - containersLivenessProbe, pod.Namespace, pod.Name, err) + klog.ErrorS(err, "Failed to json marshal liveness probe for pod", + "probe", containersLivenessProbe, "namespace", pod.Namespace, "name", pod.Name) return "", fmt.Errorf("Failed to json marshal %v for pod: %v/%v, err: %v", containersLivenessProbe, pod.Namespace, pod.Name, err) } diff --git a/pkg/webhook/pod/mutating/persistent_pod_state.go b/pkg/webhook/pod/mutating/persistent_pod_state.go index 05aba94d96..a472ab0fa1 100644 --- a/pkg/webhook/pod/mutating/persistent_pod_state.go +++ b/pkg/webhook/pod/mutating/persistent_pod_state.go @@ -73,8 +73,8 @@ func (h *PodCreateHandler) persistentPodStateMutatingPod(ctx context.Context, re return true, nil } - klog.V(3).Infof("inject node affinity(required: %s, preferred: %s) in pod(%s/%s) for PersistentPodState", - util.DumpJSON(nodeSelector), util.DumpJSON(preference), pod.Namespace, pod.Name) + klog.V(3).InfoS("inject node affinity in pod for PersistentPodState", + "required", util.DumpJSON(nodeSelector), "preferred", util.DumpJSON(preference), "namespace", pod.Namespace, "name", pod.Name) // inject persistentPodState annotation in pod if pod.Annotations == nil { @@ -147,7 +147,7 @@ func createNodeAffinity(spec appsv1alpha1.PersistentPodStateSpec, podState appsv func SelectorPersistentPodState(reader client.Reader, ref appsv1alpha1.TargetReference, ns string) *appsv1alpha1.PersistentPodState { ppsList := &appsv1alpha1.PersistentPodStateList{} if err := reader.List(context.TODO(), ppsList, &client.ListOptions{Namespace: ns}, utilclient.DisableDeepCopy); err != nil { - klog.Errorf("List PersistentPodStateList failed: %s", err.Error()) + klog.ErrorS(err, "List PersistentPodStateList failed") return nil } for i := range ppsList.Items { diff --git a/pkg/webhook/pod/mutating/pod_unavailable_budget.go b/pkg/webhook/pod/mutating/pod_unavailable_budget.go index 86b5f7197a..920c300916 100644 --- a/pkg/webhook/pod/mutating/pod_unavailable_budget.go +++ b/pkg/webhook/pod/mutating/pod_unavailable_budget.go @@ -43,6 +43,6 @@ func (h *PodCreateHandler) pubMutatingPod(ctx context.Context, req admission.Req pod.Annotations = map[string]string{} } pod.Annotations[pubcontrol.PodRelatedPubAnnotation] = pub.Name - klog.V(3).Infof("mutating add pod(%s/%s) annotation[%s]=%s", pod.Namespace, pod.Name, pubcontrol.PodRelatedPubAnnotation, pub.Name) + klog.V(3).InfoS("mutating add pod annotation", "namespace", pod.Namespace, "name", pod.Name, "key", pubcontrol.PodRelatedPubAnnotation, "value", pub.Name) return false, nil } diff --git a/pkg/webhook/pod/mutating/sidecarset.go b/pkg/webhook/pod/mutating/sidecarset.go index 21528d0778..6f110a35b7 100644 --- a/pkg/webhook/pod/mutating/sidecarset.go +++ b/pkg/webhook/pod/mutating/sidecarset.go @@ -109,13 +109,13 @@ func (h *PodCreateHandler) sidecarsetMutatingPod(ctx context.Context, req admiss // check pod if isUpdated { if !matchedSidecarSets[0].IsPodAvailabilityChanged(pod, oldPod) { - klog.V(3).Infof("pod(%s/%s) availability unchanged for sidecarSet, and ignore", pod.Namespace, pod.Name) + klog.V(3).InfoS("pod availability unchanged for sidecarSet, and ignore", "namespace", pod.Namespace, "name", pod.Name) return true, nil } } - klog.V(3).Infof("[sidecar inject] begin to operation(%s) pod(%s/%s) resources(%s) subResources(%s)", - req.Operation, req.Namespace, req.Name, req.Resource, req.SubResource) + klog.V(4).InfoS("begin to operate resource", "func", "sidecar inject", + "operation", req.Operation, "namespace", req.Namespace, "name", req.Name, "resource", req.Resource, "subResource", req.SubResource) // patch pod metadata, annotations & labels // When the Pod main container is upgraded in place, and the sidecarSet configuration does not change at this time, // at this point, it can also patch pod metadata @@ -127,7 +127,7 @@ func (h *PodCreateHandler) sidecarsetMutatingPod(ctx context.Context, req admiss sidecarSet := control.GetSidecarset() sk, err := sidecarcontrol.PatchPodMetadata(&pod.ObjectMeta, sidecarSet.Spec.PatchPodMetadata) if err != nil { - klog.Errorf("sidecarSet(%s) update pod(%s/%s) metadata failed: %s", sidecarSet.Name, pod.Namespace, pod.Name, err.Error()) + klog.ErrorS(err, "sidecarSet update pod metadata failed", "sidecarSet", sidecarSet.Name, "namespace", pod.Namespace, "podName", pod.Name) return false, err } else if !sk { // skip = false @@ -139,14 +139,15 @@ func (h *PodCreateHandler) sidecarsetMutatingPod(ctx context.Context, req admiss if err != nil { return false, err } else if len(sidecarContainers) == 0 && len(sidecarInitContainers) == 0 { - klog.V(3).Infof("[sidecar inject] pod(%s/%s) don't have injected containers", pod.Namespace, pod.Name) + klog.V(3).InfoS("pod don't have injected containers", "func", "sidecar inject", "namespace", pod.Namespace, "name", pod.Name) return skip, nil } - klog.V(3).Infof("[sidecar inject] begin inject sidecarContainers(%v) sidecarInitContainers(%v) sidecarSecrets(%v), volumes(%s)"+ - "annotations(%v) into pod(%s/%s)", sidecarContainers, sidecarInitContainers, sidecarSecrets, volumesInSidecar, injectedAnnotations, - pod.Namespace, pod.Name) - klog.V(4).Infof("[sidecar inject] before mutating: %v", util.DumpJSON(pod)) + klog.V(3).InfoS("begin inject into pod", "func", "sidecar inject", "sidecarContainers", sidecarContainers, + "sidecarInitContainers", sidecarInitContainers, "sidecarSecrets", sidecarSecrets, + "volumesInSidecar", volumesInSidecar, "injectedAnnotations", injectedAnnotations, + "namespace", pod.Namespace, "name", pod.Name) + klog.V(4).InfoS("before mutating", "func", "sidecar inject", "pod", klog.KObj(pod)) // apply sidecar set info into pod // 1. inject init containers, sort by their name, after the original init containers sort.SliceStable(sidecarInitContainers, func(i, j int) bool { @@ -163,7 +164,7 @@ func (h *PodCreateHandler) sidecarsetMutatingPod(ctx context.Context, req admiss for k, v := range injectedAnnotations { pod.Annotations[k] = v } - klog.V(4).Infof("[sidecar inject] after mutating: %v", util.DumpJSON(pod)) + klog.V(4).InfoS("after mutating", "func", "sidecar inject", "pod", klog.KObj(pod)) return false, nil } @@ -178,7 +179,7 @@ func (h *PodCreateHandler) getSuitableRevisionSidecarSet(sidecarSet *appsv1alpha hc := sidecarcontrol.NewHistoryControl(h.Client) revisions, err := history.NewHistory(h.Client).ListControllerRevisions(sidecarcontrol.MockSidecarSetForRevision(sidecarSet), hc.GetRevisionSelector(sidecarSet)) if err != nil { - klog.Errorf("Failed to list history controllerRevisions, err %v, name %v", err, sidecarSet.Name) + klog.ErrorS(err, "Failed to list history controllerRevisions", "name", sidecarSet.Name) return nil, err } @@ -234,13 +235,13 @@ func (h *PodCreateHandler) getSpecificHistorySidecarSet(sidecarSet *appsv1alpha1 hc := sidecarcontrol.NewHistoryControl(h.Client) historySidecarSet, err := hc.GetHistorySidecarSet(sidecarSet, revisionInfo) if err != nil { - klog.Warningf("Failed to restore history revision for SidecarSet %v, ControllerRevision name %v:, error: %v", - sidecarSet.Name, sidecarSet.Spec.InjectionStrategy.Revision, err) + klog.ErrorS(err, "Failed to restore history revision for SidecarSet", + "name", sidecarSet.Name, "revision", sidecarSet.Spec.InjectionStrategy.Revision) return nil, err } if historySidecarSet == nil { historySidecarSet = sidecarSet.DeepCopy() - klog.Warningf("Failed to restore history revision for SidecarSet %v, will use the latest", sidecarSet.Name) + klog.InfoS("Failed to restore history revision for SidecarSet, will use the latest", "name", sidecarSet.Name) } return historySidecarSet, nil } @@ -347,7 +348,7 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS for _, control := range matchedSidecarSets { sidecarSet := control.GetSidecarset() - klog.V(3).Infof("build pod(%s/%s) sidecar containers for sidecarSet(%s)", pod.Namespace, pod.Name, sidecarSet.Name) + klog.V(3).InfoS("build pod sidecar containers for sidecarSet", "namespace", pod.Namespace, "podName", pod.Name, "sidecarSet", sidecarSet.Name) // sidecarSet List sidecarSetNames.Insert(sidecarSet.Name) // pre-process volumes only in sidecar @@ -380,8 +381,8 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS transferEnvs := sidecarcontrol.GetSidecarTransferEnvs(initContainer, pod) // append volumeMounts SubPathExpr environments transferEnvs = util.MergeEnvVar(transferEnvs, injectedEnvs) - klog.Infof("try to inject initContainer sidecar %v@%v/%v, with injected envs: %v, volumeMounts: %v", - initContainer.Name, pod.Namespace, pod.Name, transferEnvs, injectedMounts) + klog.InfoS("try to inject initContainer sidecar", + "containerName", initContainer.Name, "namespace", pod.Namespace, "podName", pod.Name, "envs", transferEnvs, "volumeMounts", injectedMounts) // insert volumes that initContainers used for _, mount := range initContainer.VolumeMounts { volumesInSidecars = append(volumesInSidecars, *volumesMap[mount.Name]) @@ -420,8 +421,8 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS transferEnvs := sidecarcontrol.GetSidecarTransferEnvs(sidecarContainer, pod) // append volumeMounts SubPathExpr environments transferEnvs = util.MergeEnvVar(transferEnvs, injectedEnvs) - klog.Infof("try to inject Container sidecar %v@%v/%v, with injected envs: %v, volumeMounts: %v", - sidecarContainer.Name, pod.Namespace, pod.Name, transferEnvs, injectedMounts) + klog.InfoS("try to inject Container sidecar %v@%v/%v, with injected envs: %v, volumeMounts: %v", + "containerName", sidecarContainer.Name, "namespace", pod.Namespace, "podName", pod.Name, "envs", transferEnvs, "volumeMounts", injectedMounts) //when update pod object if isUpdated { // judge whether inject sidecar container into pod @@ -432,12 +433,12 @@ func buildSidecars(isUpdated bool, pod *corev1.Pod, oldPod *corev1.Pod, matchedS continue } - klog.V(3).Infof("upgrade or insert sidecar container %v during upgrade in pod %v/%v", - sidecarContainer.Name, pod.Namespace, pod.Name) + klog.V(3).InfoS("upgrade or insert sidecar container during pod upgrade", + "containerName", sidecarContainer.Name, "namespace", pod.Namespace, "podName", pod.Name) //when created pod object, need inject sidecar container into pod } else { - klog.V(3).Infof("inject new sidecar container %v during creation in pod %v/%v", - sidecarContainer.Name, pod.Namespace, pod.Name) + klog.V(3).InfoS("inject new sidecar container during pod creation", + "containerName", sidecarContainer.Name, "namespace", pod.Namespace, "podName", pod.Name) } isInjecting = true // insert volume that sidecar container used diff --git a/pkg/webhook/pod/mutating/workloadspread.go b/pkg/webhook/pod/mutating/workloadspread.go index 7fc9da0f99..48b574d793 100644 --- a/pkg/webhook/pod/mutating/workloadspread.go +++ b/pkg/webhook/pod/mutating/workloadspread.go @@ -47,7 +47,7 @@ func (h *PodCreateHandler) workloadSpreadMutatingPod(ctx context.Context, req ad // check dry run dryRun = dryrun.IsDryRun(options.DryRun) if dryRun { - klog.V(5).Infof("Operation[%s] Pod (%s/%s) is a dry run, then admit", req.AdmissionRequest.Operation, pod.Namespace, pod.Name) + klog.V(5).InfoS("Operation is a dry run, then admit", "operation", req.AdmissionRequest.Operation, "namespace", pod.Namespace, "podName", pod.Name) return true, nil } return workloadSpreadHandler.HandlePodCreation(pod) diff --git a/pkg/webhook/pod/validating/pod_create_update_handler.go b/pkg/webhook/pod/validating/pod_create_update_handler.go index 64e92f25af..5574952489 100644 --- a/pkg/webhook/pod/validating/pod_create_update_handler.go +++ b/pkg/webhook/pod/validating/pod_create_update_handler.go @@ -44,7 +44,7 @@ type PodCreateHandler struct { func (h *PodCreateHandler) validatingPodFn(ctx context.Context, req admission.Request) (allowed bool, reason string, err error) { allowed = true if req.Operation == admissionv1.Delete && len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate pod %s/%s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Namespace, req.Name) + klog.InfoS("Skip to validate pod deletion for no old object, maybe because of Kubernetes version < 1.16", "namespace", req.Namespace, "name", req.Name) return } diff --git a/pkg/webhook/pod/validating/pod_unavailable_budget.go b/pkg/webhook/pod/validating/pod_unavailable_budget.go index 3d48f5f808..a7c1047450 100644 --- a/pkg/webhook/pod/validating/pod_unavailable_budget.go +++ b/pkg/webhook/pod/validating/pod_unavailable_budget.go @@ -59,7 +59,7 @@ func (p *PodCreateHandler) podUnavailableBudgetValidatingPod(ctx context.Context } // the change will not cause pod unavailability, then pass if !pubcontrol.PubControl.IsPodUnavailableChanged(oldPod, newPod) { - klog.V(6).Infof("validate pod(%s/%s) changed can not cause unavailability, then don't need check pub", newPod.Namespace, newPod.Name) + klog.V(6).InfoS("validate pod changed can not cause unavailability, then don't need check pub", "namespace", newPod.Namespace, "name", newPod.Name) return true, "", nil } checkPod = oldPod @@ -75,7 +75,7 @@ func (p *PodCreateHandler) podUnavailableBudgetValidatingPod(ctx context.Context // filter out invalid Delete operation, only validate delete pods resources case admissionv1.Delete: if req.AdmissionRequest.SubResource != "" { - klog.V(6).Infof("pod(%s/%s) AdmissionRequest operation(DELETE) subResource(%s), then admit", req.Namespace, req.Name, req.SubResource) + klog.V(6).InfoS("pod AdmissionRequest operation(DELETE) subResource, then admit", "namespace", req.Namespace, "name", req.Name, "subResource", req.SubResource) return true, "", nil } checkPod = &corev1.Pod{} @@ -95,7 +95,7 @@ func (p *PodCreateHandler) podUnavailableBudgetValidatingPod(ctx context.Context case admissionv1.Create: // ignore create operation other than subresource eviction if req.AdmissionRequest.SubResource != "eviction" { - klog.V(6).Infof("pod(%s/%s) AdmissionRequest operation(CREATE) Resource(%s) subResource(%s), then admit", req.Namespace, req.Name, req.Resource, req.SubResource) + klog.V(6).InfoS("pod AdmissionRequest operation(CREATE) Resource and subResource, then admit", "namespace", req.Namespace, "name", req.Name, "subResource", req.SubResource, "resource", req.Resource) return true, "", nil } eviction := &policy.Eviction{} diff --git a/pkg/webhook/pod/validating/workloadspread.go b/pkg/webhook/pod/validating/workloadspread.go index bb21cc7e9e..64c1709579 100644 --- a/pkg/webhook/pod/validating/workloadspread.go +++ b/pkg/webhook/pod/validating/workloadspread.go @@ -41,12 +41,12 @@ func (p *PodCreateHandler) workloadSpreadValidatingPod(ctx context.Context, req var err error workloadSpreadHandler := wsutil.NewWorkloadSpreadHandler(p.Client) - klog.V(6).Infof("workloadSpread validate Operation[%s] Pod(%s/%s)", req.Operation, req.Namespace, req.Name) + klog.V(6).InfoS("workloadSpread validate Operation", "operation", req.Operation, "namespace", req.Namespace, "name", req.Name) switch req.AdmissionRequest.Operation { case admissionv1.Delete: if req.AdmissionRequest.SubResource != "" { - klog.V(6).Infof("Pod(%s/%s) AdmissionRequest operation(DELETE) subResource(%s), then admit", req.Namespace, req.Name, req.SubResource) + klog.V(6).InfoS("Pod AdmissionRequest operation(DELETE) subResource, then admit", "namespace", req.Namespace, "name", req.Name, "subResource", req.SubResource) return true, "", nil } @@ -63,7 +63,7 @@ func (p *PodCreateHandler) workloadSpreadValidatingPod(ctx context.Context, req } dryRun = dryrun.IsDryRun(deletion.DryRun) if dryRun { - klog.V(5).Infof("Operation[%s] Pod (%s/%s) is a dry run, then admit", req.AdmissionRequest.Operation, pod.Namespace, pod.Name) + klog.V(5).InfoS("Operation is a dry run, then admit", "operation", req.AdmissionRequest.Operation, "namespace", pod.Namespace, "name", pod.Name) return true, "", err } @@ -74,7 +74,7 @@ func (p *PodCreateHandler) workloadSpreadValidatingPod(ctx context.Context, req case admissionv1.Create: // ignore create operation other than subresource eviction if req.AdmissionRequest.SubResource != "eviction" { - klog.V(6).Infof("Pod(%s/%s) AdmissionRequest operation(CREATE) Resource(%s) subResource(%s), then admit", req.Namespace, req.Name, req.Resource, req.SubResource) + klog.V(6).InfoS("Pod AdmissionRequest operation(CREATE) Resource and subResource, then admit", "namespace", req.Namespace, "name", req.Name, "resource", req.Resource, "subResource", req.SubResource) return true, "", nil } @@ -87,7 +87,7 @@ func (p *PodCreateHandler) workloadSpreadValidatingPod(ctx context.Context, req if eviction.DeleteOptions != nil { dryRun = dryrun.IsDryRun(eviction.DeleteOptions.DryRun) if dryRun { - klog.V(5).Infof("Operation[Eviction] Pod (%s/%s) is a dry run, then admit", req.AdmissionRequest.Namespace, req.AdmissionRequest.Name) + klog.V(5).InfoS("Operation[Eviction] is a dry run, then admit", "namespace", req.AdmissionRequest.Namespace, "name", req.AdmissionRequest.Name) return true, "", nil } } diff --git a/pkg/webhook/resourcedistribution/validating/resourcedistribution_create_update_handler.go b/pkg/webhook/resourcedistribution/validating/resourcedistribution_create_update_handler.go index 49f2f847c7..2671c347a5 100755 --- a/pkg/webhook/resourcedistribution/validating/resourcedistribution_create_update_handler.go +++ b/pkg/webhook/resourcedistribution/validating/resourcedistribution_create_update_handler.go @@ -156,7 +156,7 @@ func (h *ResourceDistributionCreateUpdateHandler) Handle(ctx context.Context, re return admission.Errored(http.StatusForbidden, fmt.Errorf("feature-gate %s is not enabled", features.ResourceDistributionGate)) } if allErrs := h.validateResourceDistribution(obj, oldObj); len(allErrs) != 0 { - klog.V(3).Infof("all errors of validation: %v", allErrs) + klog.V(3).InfoS("all errors of validation", "errors", fmt.Sprintf("%v", allErrs)) return admission.Errored(http.StatusUnprocessableEntity, allErrs.ToAggregate()) } return admission.ValidationResponse(true, "") diff --git a/pkg/webhook/server.go b/pkg/webhook/server.go index a0ff5634a4..cee9f96333 100644 --- a/pkg/webhook/server.go +++ b/pkg/webhook/server.go @@ -50,7 +50,7 @@ func addHandlers(m HandlerPath2GetterMap) { func addHandlersWithGate(m HandlerPath2GetterMap, fn GateFunc) { for path, handler := range m { if len(path) == 0 { - klog.Warningf("Skip handler with empty path.") + klog.Warning("Skip handler with empty path") continue } if path[0] != '/' { @@ -58,7 +58,7 @@ func addHandlersWithGate(m HandlerPath2GetterMap, fn GateFunc) { } _, found := HandlerMap[path] if found { - klog.V(1).Infof("conflicting webhook builder path %v in handler map", path) + klog.V(1).InfoS("conflicting webhook builder path in handler map", "path", path) } HandlerMap[path] = handler if fn != nil { @@ -88,7 +88,7 @@ func SetupWithManager(mgr manager.Manager) error { filterActiveHandlers() for path, handlerGetter := range HandlerMap { server.Register(path, &webhook.Admission{Handler: handlerGetter(mgr)}) - klog.V(3).Infof("Registered webhook handler %s", path) + klog.V(3).InfoS("Registered webhook handler", "path", path) } // register conversion webhook @@ -143,7 +143,7 @@ func WaitReady() error { } if duration > time.Second*5 { - klog.Warningf("Failed to wait webhook ready over %s: %v", duration, err) + klog.ErrorS(err, "Failed to wait webhook ready", "duration", duration) } time.Sleep(time.Second * 2) } diff --git a/pkg/webhook/service/validating/service_handler.go b/pkg/webhook/service/validating/service_handler.go index 0c00899a0c..da987910e2 100644 --- a/pkg/webhook/service/validating/service_handler.go +++ b/pkg/webhook/service/validating/service_handler.go @@ -44,7 +44,7 @@ func (h *ServiceHandler) Handle(ctx context.Context, req admission.Request) admi return admission.ValidationResponse(true, "") } if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate service %s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Name) + klog.InfoS("Skip to validate service %s deletion for no old object, maybe because of Kubernetes version < 1.16", "name", req.Name) return admission.ValidationResponse(true, "") } diff --git a/pkg/webhook/sidecarset/mutating/sidecarset_create_update_handler.go b/pkg/webhook/sidecarset/mutating/sidecarset_create_update_handler.go index 45e5c364a0..f690adfd26 100644 --- a/pkg/webhook/sidecarset/mutating/sidecarset_create_update_handler.go +++ b/pkg/webhook/sidecarset/mutating/sidecarset_create_update_handler.go @@ -83,7 +83,7 @@ func (h *SidecarSetCreateHandler) Handle(ctx context.Context, req admission.Requ return admission.Errored(http.StatusInternalServerError, err) } } - klog.V(4).Infof("sidecarset after mutating: %v", util.DumpJSON(obj)) + klog.V(4).InfoS("sidecarset after mutating", "object", util.DumpJSON(obj)) if reflect.DeepEqual(obj, copy) { return admission.Allowed("") } diff --git a/pkg/webhook/statefulset/mutating/statefulset_create_update_handler.go b/pkg/webhook/statefulset/mutating/statefulset_create_update_handler.go index d597ccae0a..1dd4de00e3 100644 --- a/pkg/webhook/statefulset/mutating/statefulset_create_update_handler.go +++ b/pkg/webhook/statefulset/mutating/statefulset_create_update_handler.go @@ -121,7 +121,7 @@ func (h *StatefulSetCreateUpdateHandler) Handle(ctx context.Context, req admissi } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit StatefulSet %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit StatefulSet patches", "namespace", obj.Namespace, "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp } diff --git a/pkg/webhook/statefulset/validating/statefulset_create_update_handler.go b/pkg/webhook/statefulset/validating/statefulset_create_update_handler.go index d55c21fd6d..5ddcf89f4a 100644 --- a/pkg/webhook/statefulset/validating/statefulset_create_update_handler.go +++ b/pkg/webhook/statefulset/validating/statefulset_create_update_handler.go @@ -81,7 +81,7 @@ func (h *StatefulSetCreateUpdateHandler) Handle(ctx context.Context, req admissi } case admissionv1.Delete: if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate StatefulSet %s/%s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Namespace, req.Name) + klog.InfoS("Skip to validate StatefulSet deletion for no old object, maybe because of Kubernetes version < 1.16", "namespace", req.Namespace, "name", req.Name) return admission.ValidationResponse(true, "") } if err := h.decodeOldObject(req, oldObj); err != nil { diff --git a/pkg/webhook/uniteddeployment/mutating/uniteddeployment_create_update_handler.go b/pkg/webhook/uniteddeployment/mutating/uniteddeployment_create_update_handler.go index dd6857ffae..d1ad3e69ef 100644 --- a/pkg/webhook/uniteddeployment/mutating/uniteddeployment_create_update_handler.go +++ b/pkg/webhook/uniteddeployment/mutating/uniteddeployment_create_update_handler.go @@ -83,7 +83,7 @@ func (h *UnitedDeploymentCreateUpdateHandler) Handle(ctx context.Context, req ad } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(5).Infof("Admit UnitedDeployment %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches)) + klog.V(5).InfoS("Admit UnitedDeployment patches", "namespace", obj.Namespace, "name", obj.Name, "patches", util.DumpJSON(resp.Patches)) } return resp } diff --git a/pkg/webhook/uniteddeployment/validating/uniteddeployment_create_update_handler.go b/pkg/webhook/uniteddeployment/validating/uniteddeployment_create_update_handler.go index 0049c078ea..585e1b17f6 100644 --- a/pkg/webhook/uniteddeployment/validating/uniteddeployment_create_update_handler.go +++ b/pkg/webhook/uniteddeployment/validating/uniteddeployment_create_update_handler.go @@ -72,7 +72,7 @@ func (h *UnitedDeploymentCreateUpdateHandler) Handle(ctx context.Context, req ad } case admissionv1.Delete: if len(req.OldObject.Raw) == 0 { - klog.Warningf("Skip to validate UnitedDeployment %s/%s deletion for no old object, maybe because of Kubernetes version < 1.16", req.Namespace, req.Name) + klog.InfoS("Skip to validate UnitedDeployment deletion for no old object, maybe because of Kubernetes version < 1.16", "namespace", req.Namespace, "name", req.Name) return admission.ValidationResponse(true, "") } if err := h.Decoder.DecodeRaw(req.AdmissionRequest.OldObject, oldObj); err != nil { diff --git a/pkg/webhook/util/configuration/configuration.go b/pkg/webhook/util/configuration/configuration.go index fec45790af..e9d985f771 100644 --- a/pkg/webhook/util/configuration/configuration.go +++ b/pkg/webhook/util/configuration/configuration.go @@ -87,7 +87,7 @@ func Ensure(kubeClient clientset.Interface, handlers map[string]types.HandlerGet return err } if _, ok := handlers[path]; !ok { - klog.Warningf("Ignore webhook for %s in configuration", path) + klog.InfoS("Ignore webhook in configuration", "path", path) continue } if wh.ClientConfig.Service != nil { @@ -112,7 +112,7 @@ func Ensure(kubeClient clientset.Interface, handlers map[string]types.HandlerGet return err } if _, ok := handlers[path]; !ok { - klog.Warningf("Ignore webhook for %s in configuration", path) + klog.InfoS("Ignore webhook in configuration", "path", path) continue } if wh.ClientConfig.Service != nil { diff --git a/pkg/webhook/util/controller/webhook_controller.go b/pkg/webhook/util/controller/webhook_controller.go index c675c00a0d..2c5435e7e7 100644 --- a/pkg/webhook/util/controller/webhook_controller.go +++ b/pkg/webhook/util/controller/webhook_controller.go @@ -99,14 +99,14 @@ func New(cfg *rest.Config, handlers map[string]webhooktypes.HandlerGetter) (*Con AddFunc: func(obj interface{}) { secret := obj.(*v1.Secret) if secret.Name == secretName { - klog.Infof("Secret %s added", secretName) + klog.InfoS("Secret added", "name", secretName) c.queue.Add("") } }, UpdateFunc: func(old, cur interface{}) { secret := cur.(*v1.Secret) if secret.Name == secretName { - klog.Infof("Secret %s updated", secretName) + klog.InfoS("Secret updated", "name", secretName) c.queue.Add("") } }, @@ -116,14 +116,14 @@ func New(cfg *rest.Config, handlers map[string]webhooktypes.HandlerGetter) (*Con AddFunc: func(obj interface{}) { conf := obj.(*admissionregistrationv1.MutatingWebhookConfiguration) if conf.Name == mutatingWebhookConfigurationName { - klog.Infof("MutatingWebhookConfiguration %s added", mutatingWebhookConfigurationName) + klog.InfoS("MutatingWebhookConfiguration added", "name", mutatingWebhookConfigurationName) c.queue.Add("") } }, UpdateFunc: func(old, cur interface{}) { conf := cur.(*admissionregistrationv1.MutatingWebhookConfiguration) if conf.Name == mutatingWebhookConfigurationName { - klog.Infof("MutatingWebhookConfiguration %s update", mutatingWebhookConfigurationName) + klog.InfoS("MutatingWebhookConfiguration update", "name", mutatingWebhookConfigurationName) c.queue.Add("") } }, @@ -133,14 +133,14 @@ func New(cfg *rest.Config, handlers map[string]webhooktypes.HandlerGetter) (*Con AddFunc: func(obj interface{}) { conf := obj.(*admissionregistrationv1.ValidatingWebhookConfiguration) if conf.Name == validatingWebhookConfigurationName { - klog.Infof("ValidatingWebhookConfiguration %s added", validatingWebhookConfigurationName) + klog.InfoS("ValidatingWebhookConfiguration added", "name", validatingWebhookConfigurationName) c.queue.Add("") } }, UpdateFunc: func(old, cur interface{}) { conf := cur.(*admissionregistrationv1.ValidatingWebhookConfiguration) if conf.Name == validatingWebhookConfigurationName { - klog.Infof("ValidatingWebhookConfiguration %s updated", validatingWebhookConfigurationName) + klog.InfoS("ValidatingWebhookConfiguration updated", "name", validatingWebhookConfigurationName) c.queue.Add("") } }, @@ -152,14 +152,14 @@ func New(cfg *rest.Config, handlers map[string]webhooktypes.HandlerGetter) (*Con AddFunc: func(obj interface{}) { crd := obj.(*apiextensionsv1.CustomResourceDefinition) if crd.Spec.Group == "apps.kruise.io" { - klog.Infof("CustomResourceDefinition %s added", crd.Name) + klog.InfoS("CustomResourceDefinition added", "name", crd.Name) c.queue.Add("") } }, UpdateFunc: func(old, cur interface{}) { crd := cur.(*apiextensionsv1.CustomResourceDefinition) if crd.Spec.Group == "apps.kruise.io" { - klog.Infof("CustomResourceDefinition %s updated", crd.Name) + klog.InfoS("CustomResourceDefinition updated", "name", crd.Name) c.queue.Add("") } }, @@ -180,8 +180,8 @@ func (c *Controller) Start(ctx context.Context) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting webhook-controller") - defer klog.Infof("Shutting down webhook-controller") + klog.Info("Starting webhook-controller") + defer klog.Info("Shutting down webhook-controller") c.informerFactory.Start(ctx.Done()) go func() { @@ -195,7 +195,7 @@ func (c *Controller) Start(ctx context.Context) { for c.processNextWorkItem() { } }, time.Second, ctx.Done()) - klog.Infof("Started webhook-controller") + klog.Info("Started webhook-controller") <-ctx.Done() } @@ -221,9 +221,9 @@ func (c *Controller) processNextWorkItem() bool { } func (c *Controller) sync() error { - klog.Infof("Starting to sync webhook certs and configurations") + klog.Info("Starting to sync webhook certs and configurations") defer func() { - klog.Infof("Finished to sync webhook certs and configurations") + klog.Info("Finished to sync webhook certs and configurations") }() dnsName := webhookutil.GetHost() diff --git a/pkg/webhook/util/health/checker.go b/pkg/webhook/util/health/checker.go index 0e5dc597bf..934f56ee72 100644 --- a/pkg/webhook/util/health/checker.go +++ b/pkg/webhook/util/health/checker.go @@ -72,17 +72,17 @@ func watchCACert(watcher *fsnotify.Watcher) { continue } - klog.Infof("Watched ca-cert %v %v", event.Name, event.Op) + klog.InfoS("Watched ca-cert", "eventName", event.Name, "operation", event.Op) // If the file was removed, re-add the watch. if isRemove(event) { if err := watcher.Add(event.Name); err != nil { - klog.Errorf("Failed to re-watch ca-cert %v: %v", event.Name, err) + klog.ErrorS(err, "Failed to re-watch ca-cert", "eventName", event.Name) } } if err := loadHTTPClientWithCACert(); err != nil { - klog.Errorf("Failed to reload ca-cert %v: %v", event.Name, err) + klog.ErrorS(err, "Failed to reload ca-cert", "eventName", event.Name) } case err, ok := <-watcher.Errors: @@ -90,7 +90,7 @@ func watchCACert(watcher *fsnotify.Watcher) { if !ok { return } - klog.Errorf("Failed to watch ca-cert: %v", err) + klog.ErrorS(err, "Failed to watch ca-cert") } } } diff --git a/pkg/webhook/util/util.go b/pkg/webhook/util/util.go index c1018dd1a6..17f3a1f0dc 100644 --- a/pkg/webhook/util/util.go +++ b/pkg/webhook/util/util.go @@ -83,7 +83,7 @@ func GetRenewBeforeTime() time.Duration { if s := os.Getenv("CERTS_RENEW_BEFORE"); len(s) > 0 { t, err := strconv.Atoi(s[0 : len(s)-1]) if err != nil { - klog.Errorf("failed to parese time %s: %v", s[0:len(s)-1], err) + klog.ErrorS(err, "failed to parse time", "time", s[0:len(s)-1]) return renewBefore } suffix := s[len(s)-1] @@ -94,7 +94,7 @@ func GetRenewBeforeTime() time.Duration { } else if suffix == 'y' { renewBefore = time.Duration(t) * 365 * time.Hour } else { - klog.Errorf("unknown date suffix %c", suffix) + klog.InfoS("unknown date suffix", "suffix", suffix) } } if renewBefore <= 0 { diff --git a/pkg/webhook/util/writer/secret.go b/pkg/webhook/util/writer/secret.go index 33fc56b4d8..4b0f616780 100644 --- a/pkg/webhook/util/writer/secret.go +++ b/pkg/webhook/util/writer/secret.go @@ -119,11 +119,11 @@ func (s *secretCertWriter) overwrite(resourceVersion string) (*generator.Artifac secret.ResourceVersion = resourceVersion secret, err = s.Clientset.CoreV1().Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) if err != nil { - klog.Infof("Cert writer update secret failed: %v", err) + klog.ErrorS(err, "Cert writer update secret failed") return nil, err } - klog.Infof("Cert writer update secret %s resourceVersion from %s to %s", - secret.Name, resourceVersion, secret.ResourceVersion, + klog.InfoS("Cert writer update secret resourceVersion", + "name", secret.Name, "from", resourceVersion, "to", secret.ResourceVersion, ) return certs, nil } diff --git a/pkg/webhook/workloadspread/validating/workloadspread_validation.go b/pkg/webhook/workloadspread/validating/workloadspread_validation.go index e3ede1d4dd..a7e37d3cf5 100644 --- a/pkg/webhook/workloadspread/validating/workloadspread_validation.go +++ b/pkg/webhook/workloadspread/validating/workloadspread_validation.go @@ -62,7 +62,7 @@ var ( func verifyGroupKind(ref *appsv1alpha1.TargetReference, expectedKind string, expectedGroups []string) (bool, error) { gv, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { - klog.Errorf("failed to parse GroupVersion for apiVersion (%s): %s", ref.APIVersion, err.Error()) + klog.ErrorS(err, "failed to parse GroupVersion for apiVersion", "apiVersion", ref.APIVersion) return false, err }