From 03a6c7f517b60d31e30c910ed4a4469145ce686c Mon Sep 17 00:00:00 2001 From: zwwhdls Date: Tue, 27 Aug 2024 17:24:58 +0800 Subject: [PATCH 1/2] refact log Signed-off-by: zwwhdls --- cmd/app/mount_manager.go | 17 +- cmd/app/pod_manager.go | 11 +- cmd/app/webhook.go | 21 ++- cmd/controller.go | 34 ++-- cmd/dashboard/main.go | 27 +-- cmd/main.go | 14 +- cmd/node.go | 42 ++--- deploy/k8s.yaml | 2 - deploy/k8s_before_v1_18.yaml | 2 - deploy/kubernetes/base/resources.yaml | 2 - go.mod | 3 +- go.sum | 2 - pkg/config/config.go | 27 +-- pkg/config/setting.go | 17 +- pkg/controller/app_controller.go | 55 ++++--- pkg/controller/job_controller.go | 44 ++--- pkg/controller/mount_controller.go | 40 +++-- pkg/controller/mountinfo.go | 6 +- pkg/controller/pod_controller.go | 32 ++-- pkg/controller/pod_driver.go | 169 ++++++++++--------- pkg/controller/reconciler.go | 16 +- pkg/controller/secret_controller.go | 40 +++-- pkg/dashboard/controller.go | 29 ++-- pkg/dashboard/index.go | 6 +- pkg/dashboard/pod.go | 38 +++-- pkg/dashboard/pv.go | 8 +- pkg/driver/controller.go | 42 +++-- pkg/driver/driver.go | 17 +- pkg/driver/identity.go | 10 +- pkg/driver/node.go | 54 +++--- pkg/driver/node_test.go | 49 +++--- pkg/driver/provisioner.go | 47 +++--- pkg/fuse/passfd.go | 44 ++--- pkg/juicefs/juicefs.go | 170 +++++++------------ pkg/juicefs/juicefs_test.go | 173 -------------------- pkg/juicefs/mount/builder/common.go | 7 +- pkg/juicefs/mount/builder/interface.go | 9 +- pkg/juicefs/mount/builder/job.go | 5 +- pkg/juicefs/mount/builder/vci-serverless.go | 3 +- pkg/juicefs/mount/pod_mount.go | 141 +++++++++------- pkg/juicefs/mount/pod_mount_test.go | 16 +- pkg/juicefs/mount/process_mount.go | 53 +++--- pkg/juicefs/mount/process_mount_test.go | 14 ++ pkg/k8sclient/client.go | 110 +++++++------ pkg/k8sclient/kubelet_client.go | 6 +- pkg/util/log.go | 41 +++++ pkg/util/resource/namespace.go | 5 + pkg/util/resource/pod.go | 49 +++--- pkg/util/resource/pvc.go | 13 +- pkg/util/resource/terminal.go | 11 +- pkg/util/resource/volume.go | 5 +- pkg/util/util.go | 11 +- pkg/webhook/handler/handler.go | 32 ++-- pkg/webhook/handler/mutate/sidecar.go | 18 +- pkg/webhook/handler/register.go | 10 +- 55 files changed, 929 insertions(+), 940 deletions(-) create mode 100644 pkg/util/log.go diff --git a/cmd/app/mount_manager.go b/cmd/app/mount_manager.go index 96c0f0f25f..7a501da610 100644 --- a/cmd/app/mount_manager.go +++ b/cmd/app/mount_manager.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -38,6 +38,7 @@ import ( var ( scheme = runtime.NewScheme() + log = klog.NewKlogr().WithName("manager") ) func init() { @@ -79,14 +80,14 @@ func NewMountManager( }), }) if err != nil { - klog.Errorf("New mount controller error: %v", err) + log.Error(err, "New mount controller error") return nil, err } // gen k8s client k8sClient, err := k8sclient.NewClient() if err != nil { - klog.V(5).Infof("Could not create k8s client %v", err) + log.Error(err, "Could not create k8s client") return nil, err } @@ -99,22 +100,22 @@ func NewMountManager( func (m *MountManager) Start(ctx context.Context) { // init Reconciler(Controller) if err := (mountctrl.NewMountController(m.client)).SetupWithManager(m.mgr); err != nil { - klog.Errorf("Register mount controller error: %v", err) + log.Error(err, "Register mount controller error") return } if err := (mountctrl.NewJobController(m.client)).SetupWithManager(m.mgr); err != nil { - klog.Errorf("Register job controller error: %v", err) + log.Error(err, "Register job controller error") return } if config.CacheClientConf { if err := (mountctrl.NewSecretController(m.client)).SetupWithManager(m.mgr); err != nil { - klog.Errorf("Register secret controller error: %v", err) + log.Error(err, "Register secret controller error") return } } - klog.Info("Mount manager started.") + log.Info("Mount manager started.") if err := m.mgr.Start(ctx); err != nil { - klog.Errorf("Mount manager start error: %v", err) + log.Error(err, "Mount manager start error") os.Exit(1) } } diff --git a/cmd/app/pod_manager.go b/cmd/app/pod_manager.go index 1e542a7b76..0ef4bb8738 100644 --- a/cmd/app/pod_manager.go +++ b/cmd/app/pod_manager.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -62,14 +61,14 @@ func NewPodManager() (*PodManager, error) { }), }) if err != nil { - klog.Errorf("New pod controller error: %v", err) + log.Error(err, "New pod controller error") return nil, err } // gen k8s client k8sClient, err := k8sclient.NewClient() if err != nil { - klog.V(5).Infof("Could not create k8s client %v", err) + log.Info("Could not create k8s client") return nil, err } @@ -82,12 +81,12 @@ func NewPodManager() (*PodManager, error) { func (m *PodManager) Start(ctx context.Context) error { // init Reconciler(Controller) if err := (mountctrl.NewPodController(m.client)).SetupWithManager(m.mgr); err != nil { - klog.Errorf("Register pod controller error: %v", err) + log.Error(err, "Register pod controller error") return err } - klog.Info("Pod manager started.") + log.Info("Pod manager started.") if err := m.mgr.Start(ctx); err != nil { - klog.Errorf("Pod manager start error: %v", err) + log.Error(err, "Pod manager start error") return err } return nil diff --git a/cmd/app/webhook.go b/cmd/app/webhook.go index cc65e67b02..d61ac2e46b 100644 --- a/cmd/app/webhook.go +++ b/cmd/app/webhook.go @@ -23,7 +23,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -44,7 +43,7 @@ func NewWebhookManager(certDir string, webhookPort int, leaderElection bool, _ = clientgoscheme.AddToScheme(scheme) cfg, err := ctrl.GetConfig() if err != nil { - klog.Error(err, "can not get kube config") + log.Error(err, "can not get kube config") return nil, err } @@ -68,18 +67,18 @@ func NewWebhookManager(certDir string, webhookPort int, leaderElection bool, }) if err != nil { - klog.Error(err, "initialize controller manager failed") + log.Error(err, "initialize controller manager failed") return nil, err } // gen k8s client k8sClient, err := k8sclient.NewClient() if err != nil { - klog.V(5).Infof("Could not create k8s client %v", err) + log.Error(err, "Could not create k8s client") return nil, err } if config.CacheClientConf { if err := (mountctrl.NewSecretController(k8sClient)).SetupWithManager(mgr); err != nil { - klog.Errorf("Register secret controller error: %v", err) + log.Error(err, "Register secret controller error") return nil, err } } @@ -91,16 +90,16 @@ func NewWebhookManager(certDir string, webhookPort int, leaderElection bool, func (w *WebhookManager) Start(ctx context.Context) error { if err := w.registerWebhook(); err != nil { - klog.Errorf("Register webhook error: %v", err) + log.Error(err, "Register webhook error") return err } if err := w.registerAppController(); err != nil { - klog.Errorf("Register app controller error: %v", err) + log.Error(err, "Register app controller error") return err } - klog.Info("Webhook manager started.") + log.Info("Webhook manager started.") if err := w.mgr.Start(ctx); err != nil { - klog.Errorf("Webhook manager start error: %v", err) + log.Error(err, "Webhook manager start error") return err } return nil @@ -108,13 +107,13 @@ func (w *WebhookManager) Start(ctx context.Context) error { func (w *WebhookManager) registerWebhook() error { // register admission handlers - klog.Info("Register webhook handler") + log.Info("Register webhook handler") handler.Register(w.mgr, w.client) return nil } func (w *WebhookManager) registerAppController() error { // init Reconciler(Controller) - klog.Info("Register app controller") + log.Info("Register app controller") return (mountctrl.NewAppController(w.client)).SetupWithManager(w.mgr) } diff --git a/cmd/controller.go b/cmd/controller.go index 64661e5e56..829d3b4340 100644 --- a/cmd/controller.go +++ b/cmd/controller.go @@ -27,7 +27,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/cmd/app" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -74,7 +73,8 @@ func parseControllerConfig() { if immutable, err := strconv.ParseBool(jfsImmutable); err == nil { config.Immutable = immutable } else { - klog.Errorf("cannot parse JUICEFS_IMMUTABLE: %v", err) + log.Error(err, "cannot parse JUICEFS_IMMUTABLE") + os.Exit(1) } } @@ -106,8 +106,8 @@ func parseControllerConfig() { // When not in sidecar mode, we should inherit attributes from CSI Node pod. k8sclient, err := k8s.NewClient() if err != nil { - klog.V(5).Infof("Can't get k8s client: %v", err) - os.Exit(0) + log.Error(err, "Can't get k8s client") + os.Exit(1) } CSINodeDsName := "juicefs-csi-node" if name := os.Getenv("JUICEFS_CSI_NODE_DS_NAME"); name != "" { @@ -115,8 +115,8 @@ func parseControllerConfig() { } ds, err := k8sclient.GetDaemonSet(context.TODO(), CSINodeDsName, config.Namespace) if err != nil { - klog.V(5).Infof("Can't get DaemonSet %s: %v", CSINodeDsName, err) - os.Exit(0) + log.Error(err, "Can't get DaemonSet", "ds", CSINodeDsName) + os.Exit(1) } config.CSIPod = corev1.Pod{ Spec: ds.Spec.Template.Spec, @@ -127,7 +127,8 @@ func parseControllerConfig() { func controllerRun(ctx context.Context) { parseControllerConfig() if nodeID == "" { - klog.Fatalln("nodeID must be provided") + log.Info("nodeID must be provided") + os.Exit(1) } // http server for pprof @@ -135,7 +136,8 @@ func controllerRun(ctx context.Context) { port := 6060 for { if err := http.ListenAndServe(fmt.Sprintf("localhost:%d", port), nil); err != nil { - klog.Errorf("failed to start pprof server: %v", err) + log.Error(err, "failed to start pprof server") + os.Exit(1) } port++ } @@ -157,7 +159,7 @@ func controllerRun(ctx context.Context) { Handler: mux, } if err := server.ListenAndServe(); err != nil { - klog.Errorf("failed to start metrics server: %v", err) + log.Error(err, "failed to start metrics server") } }() @@ -166,7 +168,7 @@ func controllerRun(ctx context.Context) { go func() { mgr, err := app.NewMountManager(leaderElection, leaderElectionNamespace, leaderElectionLeaseDuration) if err != nil { - klog.Error(err) + log.Error(err, "fail to create mount manager") return } mgr.Start(ctx) @@ -178,24 +180,28 @@ func controllerRun(ctx context.Context) { go func() { mgr, err := app.NewWebhookManager(certDir, webhookPort, leaderElection, leaderElectionNamespace, leaderElectionLeaseDuration) if err != nil { - klog.Fatalln(err) + log.Error(err, "fail to create webhook manager") + os.Exit(1) } if err := mgr.Start(ctx); err != nil { - klog.Fatalln(err) + log.Error(err, "fail to start webhook manager") + os.Exit(1) } }() } drv, err := driver.NewDriver(endpoint, nodeID, leaderElection, leaderElectionNamespace, leaderElectionLeaseDuration, registerer) if err != nil { - klog.Fatalln(err) + log.Error(err, "fail to create driver") + os.Exit(1) } go func() { <-ctx.Done() drv.Stop() }() if err := drv.Run(); err != nil { - klog.Fatalln(err) + log.Error(err, "fail to run driver") + os.Exit(1) } } diff --git a/cmd/dashboard/main.go b/cmd/dashboard/main.go index df1024a68f..c015ff2f84 100644 --- a/cmd/dashboard/main.go +++ b/cmd/dashboard/main.go @@ -20,7 +20,6 @@ import ( "context" goflag "flag" "fmt" - "log" "net/http" _ "net/http/pprof" "os" @@ -40,7 +39,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -58,6 +57,7 @@ const ( var ( scheme = runtime.NewScheme() + log = klog.NewKlogr().WithName("main") port uint16 devMode bool @@ -114,11 +114,13 @@ func run() { config = ctrl.GetConfigOrDie() } if err != nil { - log.Fatalf("can't get k8s config: %v", err) + log.Error(err, "can't get k8s config") + os.Exit(1) } mgr, err := newManager(config) if err != nil { - log.Fatalf("can't create manager: %v", err) + log.Error(err, "can't create manager") + os.Exit(1) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -159,28 +161,33 @@ func run() { } go func() { - log.Printf("listen on %s\n", addr) + log.Info("listen and serve", "addr", addr) if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Fatalf("listen: %s\n", err) + log.Error(err, "listen error") + os.Exit(1) } }() go func() { // pprof server - log.Println(http.ListenAndServe("localhost:8089", nil)) + err = http.ListenAndServe("localhost:8089", nil) + if err != nil { + log.Error(err, "pprof server error") + } }() quit := make(chan os.Signal, 1) go func() { if err := podApi.StartManager(ctx, mgr); err != nil { - klog.Errorf("manager start error: %v", err) + log.Error(err, "manager start error") } quit <- syscall.SIGTERM }() signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) <-quit - log.Println("Shutdown Server ...") + log.Info("Shutdown Server ...") if err := srv.Shutdown(ctx); err != nil { - log.Fatal("Server Shutdown:", err) + log.Error(err, "Server Shutdown") + os.Exit(1) } } diff --git a/cmd/main.go b/cmd/main.go index d8f7ec9d3d..a424d267c2 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -25,7 +25,7 @@ import ( "time" "github.com/spf13/cobra" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -53,6 +53,8 @@ var ( leaderElection bool leaderElectionNamespace string leaderElectionLeaseDuration time.Duration + + log = klog.NewKlogr().WithName("main") ) func main() { @@ -63,7 +65,8 @@ func main() { if version { info, err := driver.GetVersionJSON() if err != nil { - klog.Fatalln(err) + log.Error(err, "fail to get version info") + os.Exit(1) } fmt.Println(info) os.Exit(0) @@ -107,18 +110,19 @@ func main() { func run() { if configPath != "" { if err := config.StartConfigReloader(configPath); err != nil { - klog.Fatalf("fail to load config: %v", err) + log.Error(err, "fail to load config") + os.Exit(1) } } ctx := ctrl.SetupSignalHandler() podName := os.Getenv("POD_NAME") if strings.Contains(podName, "csi-controller") { - klog.Info("Run CSI controller") + log.Info("Run CSI controller") controllerRun(ctx) } if strings.Contains(podName, "csi-node") { - klog.Info("Run CSI node") + log.Info("Run CSI node") nodeRun(ctx) } } diff --git a/cmd/node.go b/cmd/node.go index 798391d668..996395ef76 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -26,7 +26,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/client-go/util/retry" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/cmd/app" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -53,7 +52,7 @@ func parseNodeConfig() { if immutable, err := strconv.ParseBool(jfsImmutable); err == nil { config.Immutable = immutable } else { - klog.Errorf("cannot parse JUICEFS_IMMUTABLE: %v", err) + log.Error(err, "cannot parse JUICEFS_IMMUTABLE") } } config.NodeName = os.Getenv("NODE_NAME") @@ -107,8 +106,8 @@ func parseNodeConfig() { } if config.PodName == "" || config.Namespace == "" { - klog.Fatalln("Pod name & namespace can't be null.") - os.Exit(0) + log.Info("Pod name & namespace can't be null.") + os.Exit(1) } config.ReconcilerInterval = reconcilerInterval if config.ReconcilerInterval < 5 { @@ -117,26 +116,27 @@ func parseNodeConfig() { k8sclient, err := k8s.NewClient() if err != nil { - klog.V(5).Infof("Can't get k8s client: %v", err) - os.Exit(0) + log.Error(err, "Can't get k8s client") + os.Exit(1) } pod, err := k8sclient.GetPod(context.TODO(), config.PodName, config.Namespace) if err != nil { - klog.V(5).Infof("Can't get pod %s: %v", config.PodName, err) - os.Exit(0) + log.Error(err, "Can't get pod", "pod", config.PodName) + os.Exit(1) } config.CSIPod = *pod err = fuse.InitGlobalFds(context.TODO(), "/tmp") if err != nil { - klog.Errorf("Init global fds error: %v", err) - os.Exit(0) + log.Error(err, "Init global fds error") + os.Exit(1) } } func nodeRun(ctx context.Context) { parseNodeConfig() if nodeID == "" { - klog.Fatalln("nodeID must be provided") + log.Info("nodeID must be provided") + os.Exit(1) } // http server for pprof @@ -144,7 +144,7 @@ func nodeRun(ctx context.Context) { port := 6060 for { if err := http.ListenAndServe(fmt.Sprintf("localhost:%d", port), nil); err != nil { - klog.Errorf("failed to start pprof server: %v", err) + log.Error(err, "failed to start pprof server") } port++ } @@ -166,7 +166,7 @@ func nodeRun(ctx context.Context) { Handler: mux, } if err := server.ListenAndServe(); err != nil { - klog.Errorf("failed to start metrics server: %v", err) + log.Error(err, "failed to start metrics server") } }() @@ -177,7 +177,7 @@ func nodeRun(ctx context.Context) { if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { return true }, func() error { return controller.StartReconciler() }); err != nil { - klog.V(5).Infof("Could not Start Reconciler of polling kubelet and fallback to watch ApiServer. err: %+v", err) + log.Error(err, "Could not Start Reconciler of polling kubelet and fallback to watch ApiServer.") needStartPodManager = true } } else { @@ -188,20 +188,23 @@ func nodeRun(ctx context.Context) { go func() { mgr, err := app.NewPodManager() if err != nil { - klog.Fatalln(err) + log.Error(err, "fail to create pod manager") + os.Exit(1) } if err := mgr.Start(ctx); err != nil { - klog.Fatalln(err) + log.Error(err, "fail to start pod manager") + os.Exit(1) } }() } - klog.V(5).Infof("Pod Reconciler Started") + log.Info("Pod Reconciler Started") } drv, err := driver.NewDriver(endpoint, nodeID, leaderElection, leaderElectionNamespace, leaderElectionLeaseDuration, registerer) if err != nil { - klog.Fatalln(err) + log.Error(err, "fail to create driver") + os.Exit(1) } go func() { @@ -210,6 +213,7 @@ func nodeRun(ctx context.Context) { }() if err := drv.Run(); err != nil { - klog.Fatalln(err) + log.Error(err, "fail to run driver") + os.Exit(1) } } diff --git a/deploy/k8s.yaml b/deploy/k8s.yaml index b1ee011675..802c9e9e95 100644 --- a/deploy/k8s.yaml +++ b/deploy/k8s.yaml @@ -509,7 +509,6 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=5 - --config=/etc/config/config.yaml env: - name: CSI_ENDPOINT @@ -657,7 +656,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=5 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/k8s_before_v1_18.yaml b/deploy/k8s_before_v1_18.yaml index ce4f21e02c..4cfced3502 100644 --- a/deploy/k8s_before_v1_18.yaml +++ b/deploy/k8s_before_v1_18.yaml @@ -509,7 +509,6 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=5 - --config=/etc/config/config.yaml env: - name: CSI_ENDPOINT @@ -657,7 +656,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=5 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/kubernetes/base/resources.yaml b/deploy/kubernetes/base/resources.yaml index 177a3bca68..5e5f1b4691 100644 --- a/deploy/kubernetes/base/resources.yaml +++ b/deploy/kubernetes/base/resources.yaml @@ -361,7 +361,6 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=5 - --config=/etc/config/config.yaml env: - name: CSI_ENDPOINT @@ -504,7 +503,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=5 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/go.mod b/go.mod index 7c49e6b78d..b4c8ddf648 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( k8s.io/api v0.23.0 k8s.io/apimachinery v0.23.0 k8s.io/client-go v0.23.0 - k8s.io/klog v1.0.0 + k8s.io/klog/v2 v2.70.1 k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 @@ -88,7 +88,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.23.0 // indirect k8s.io/component-base v0.23.0 // indirect - k8s.io/klog/v2 v2.70.1 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index 6dc9c423bb..348b37d203 100644 --- a/go.sum +++ b/go.sum @@ -940,8 +940,6 @@ k8s.io/component-base v0.22.0/go.mod h1:SXj6Z+V6P6GsBhHZVbWCw9hFjUdUYnJerlhhPnYC k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= diff --git a/pkg/config/config.go b/pkg/config/config.go index 71fb16cd8b..135180ab74 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -30,12 +30,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/klog" + "k8s.io/klog/v2" corev1 "k8s.io/api/core/v1" ) var ( + log = klog.NewKlogr().WithName("config") WebPort = MustGetWebPort() // web port used by metrics ByProcess = false // csi driver runs juicefs in process or not FormatInPod = false // put format/auth in pod (only in k8s) @@ -179,7 +180,7 @@ func MustGetWebPort() int { if err == nil { return port } - klog.Errorf("Fail to parse JUICEFS_CSI_WEB_PORT %s: %v", value, err) + log.Error(err, "Fail to parse JUICEFS_CSI_WEB_PORT", "port", value) } return 8080 } @@ -286,7 +287,7 @@ func (mpp *MountPodPatch) merge(mp MountPodPatch) { } for _, v := range mp.Volumes { if IsInterVolume(v.Name) { - klog.Warningf("applyConfig: volume %s uses an internal volume name, ignore", v.Name) + log.Info("applyConfig: volume uses an internal volume name, ignore", "volume", v.Name) continue } found := false @@ -297,7 +298,7 @@ func (mpp *MountPodPatch) merge(mp MountPodPatch) { } } if found { - klog.Warningf("applyConfig: volume %s already exists, ignore", v.Name) + log.Info("applyConfig: volume already exists, ignore", "volume", v.Name) continue } vok[v.Name] = true @@ -310,7 +311,7 @@ func (mpp *MountPodPatch) merge(mp MountPodPatch) { } for _, vm := range mp.VolumeMounts { if !vok[vm.Name] { - klog.Warningf("applyConfig: volumeMount %s not exists in volumes, ignore", vm.Name) + log.Info("applyConfig: volumeMount not exists in volumes, ignore", "volume", vm.Name) continue } mpp.VolumeMounts = append(mpp.VolumeMounts, vm) @@ -322,7 +323,7 @@ func (mpp *MountPodPatch) merge(mp MountPodPatch) { } for _, vm := range mp.VolumeDevices { if !vok[vm.Name] { - klog.Warningf("applyConfig: volumeDevices %s not exists in volumes, ignore", vm.Name) + log.Info("applyConfig: volumeDevices not exists in volumes, ignore", "volume", vm.Name) continue } mpp.VolumeDevices = append(mpp.VolumeDevices, vm) @@ -373,7 +374,7 @@ func (c *Config) GenMountPodPatch(setting JfsSetting) MountPodPatch { strData = strings.ReplaceAll(strData, "${VOLUME_NAME}", setting.Name) strData = strings.ReplaceAll(strData, "${SUB_PATH}", setting.SubPath) _ = json.Unmarshal([]byte(strData), patch) - klog.V(6).Infof("volume %s using patch: %+v", setting.VolumeId, patch) + log.V(1).Info("volume using patch", "volumeId", setting.VolumeId, "patch", patch) return *patch } @@ -410,7 +411,7 @@ func LoadConfig(configPath string) error { } GlobalConfig = cfg - klog.V(6).Infof("config loaded: %+v", GlobalConfig) + log.V(1).Info("config loaded", "global config", *GlobalConfig) return err } @@ -434,7 +435,7 @@ func StartConfigReloader(configPath string) error { select { case event, ok := <-watcher.Events: if !ok { - klog.Errorf("fsnotify watcher closed") + log.Info("fsnotify watcher closed") continue } if event.Op != fsnotify.Write && event.Op != fsnotify.Remove { @@ -447,17 +448,17 @@ func StartConfigReloader(configPath string) error { _ = watcher.Add(configPath) } - klog.Infof("config file %s updated, reload config", configPath) + log.Info("config file updated, reload config", "config file", configPath) err := LoadConfig(configPath) if err != nil { - klog.Errorf("fail to reload config: %v", err) + log.Error(err, "fail to reload config") continue } case err, ok := <-watcher.Errors: if !ok { continue } - klog.Errorf("fsnotify error: %v", err) + log.Error(err, "fsnotify error") } } }(fsnotifyWatcher) @@ -469,7 +470,7 @@ func StartConfigReloader(configPath string) error { for range ticker.C { err = LoadConfig(configPath) if err != nil { - klog.Error(err) + log.Error(err, "fail to load config") } } }() diff --git a/pkg/config/setting.go b/pkg/config/setting.go index 6ec9be911a..414d414d8f 100644 --- a/pkg/config/setting.go +++ b/pkg/config/setting.go @@ -30,7 +30,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -195,7 +194,7 @@ func ParseSetting(secrets, volCtx map[string]string, options []string, usePod bo if secrets["configs"] != "" { configStr := secrets["configs"] configs := make(map[string]string) - klog.V(6).Infof("Get configs in secret: %v", configStr) + log.V(1).Info("Get configs in secret", "config", configStr) if err := parseYamlOrJson(configStr, &configs); err != nil { return nil, err } @@ -205,7 +204,7 @@ func ParseSetting(secrets, volCtx map[string]string, options []string, usePod bo if secrets["envs"] != "" { envStr := secrets["envs"] env := make(map[string]string) - klog.V(6).Infof("Get envs in secret: %v", envStr) + log.V(1).Info("Get envs in secret", "env", envStr) if err := parseYamlOrJson(envStr, &env); err != nil { return nil, err } @@ -292,7 +291,7 @@ func genCacheDirs(jfsSetting *JfsSetting, volCtx map[string]string) error { Medium: medium, Path: volPath, } - klog.Infof("sizeLimit of emptyDir is %s", sizeLimit) + log.Info("sizeLimit of emptyDir", "size", sizeLimit) if sizeLimit != "" { if jfsSetting.CacheEmptyDir.SizeLimit, err = resource.ParseQuantity(sizeLimit); err != nil { return err @@ -309,7 +308,7 @@ func genCacheDirs(jfsSetting *JfsSetting, volCtx map[string]string) error { return fmt.Errorf("parse cache inline volume error: %v", err) } jfsSetting.CacheInlineVolumes = make([]*CacheInlineVolume, 0) - klog.V(6).Infof("get cache inline volume: %v", inlineVolumes) + log.V(1).Info("get cache inline volume", "inline volume", inlineVolumes) for i, inlineVolume := range inlineVolumes { volPath := fmt.Sprintf("/var/jfsCache-inlineVolume-%d", i) @@ -367,7 +366,7 @@ func genAndValidOptions(JfsSetting *JfsSetting, options []string) error { mountOption = fmt.Sprintf("%s=%s", strings.TrimSpace(ops[0]), strings.TrimSpace(ops[1])) } if mountOption == "writeback" { - klog.Warningf("writeback is not suitable in CSI, please do not use it. volumeId: %s", JfsSetting.VolumeId) + log.Info("writeback is not suitable in CSI, please do not use it.", "volumeId", JfsSetting.VolumeId) } if len(ops) == 2 && ops[0] == "buffer-size" { memLimit := JfsSetting.Attr.Resources.Limits[corev1.ResourceMemory] @@ -438,7 +437,7 @@ func GenPodAttrWithCfg(setting *JfsSetting, volCtx map[string]string) error { memoryRequest := volCtx[MountPodMemRequestKey] attr.Resources, err = ParsePodResources(cpuLimit, memoryLimit, cpuRequest, memoryRequest) if err != nil { - klog.Errorf("Parse resource error: %v", err) + log.Error(err, "Parse resource error") return err } if v, ok := volCtx[mountPodLabelKey]; ok && v != "" { @@ -504,12 +503,12 @@ func GenPodAttrWithMountPod(ctx context.Context, client *k8sclient.K8sClient, mo pvName := mountPod.Annotations[UniqueId] pv, err := client.GetPersistentVolume(ctx, pvName) if err != nil { - klog.Errorf("Get pv %s error: %v", pvName, err) + log.Error(err, "Get pv error", "pv", pvName) return nil, err } pvc, err := client.GetPersistentVolumeClaim(ctx, pv.Spec.ClaimRef.Name, pv.Spec.ClaimRef.Namespace) if err != nil { - klog.Errorf("Get pvc %s/%s error: %v", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name, err) + log.Error(err, "Get pvc error", "namespace", pv.Spec.ClaimRef.Namespace, "name", pv.Spec.ClaimRef.Name) return nil, err } cpuLimit := pvc.Annotations[MountPodCpuLimitKey] diff --git a/pkg/controller/app_controller.go b/pkg/controller/app_controller.go index f7ad450f4f..73d40fe308 100644 --- a/pkg/controller/app_controller.go +++ b/pkg/controller/app_controller.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -36,6 +36,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util" ) +var ( + appCtrlLog = klog.NewKlogr().WithName("app-controller") +) + type AppController struct { *k8sclient.K8sClient } @@ -45,19 +49,19 @@ func NewAppController(client *k8sclient.K8sClient) *AppController { } func (a *AppController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(6).Infof("Receive pod %s %s", request.Name, request.Namespace) + appCtrlLog.V(1).Info("Receive pod %s %s", "name", request.Name, "namespace", request.Namespace) pod, err := a.K8sClient.GetPod(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("get pod %s error: %v", request.Name, err) + appCtrlLog.Error(err, "get pod error", "name", request.Name) return reconcile.Result{}, err } if pod == nil { - klog.V(6).Infof("pod %s not found.", request.Name) + appCtrlLog.V(1).Info("pod not found.", "name", request.Name) return reconcile.Result{}, nil } if !ShouldInQueue(pod) { - klog.V(6).Infof("pod %s namespace %s should not in queue", request.Name, request.Namespace) + appCtrlLog.V(1).Info("pod should not in queue", "name", request.Name) return reconcile.Result{}, nil } // umount fuse sidecars @@ -83,15 +87,16 @@ func (a *AppController) umountFuseSidecar(pod *corev1.Pod, fuseContainer corev1. if fuseContainer.Name == "" { return } + log := appCtrlLog.WithValues("pod", pod.Name, "namespace", pod.Namespace) // get prestop if fuseContainer.Lifecycle == nil || fuseContainer.Lifecycle.PreStop == nil || fuseContainer.Lifecycle.PreStop.Exec == nil { - klog.Infof("[AppController] no prestop in container %s of pod [%s] in [%s]", config.MountContainerName, pod.Name, pod.Namespace) + log.Info("no prestop in container of pod", "cnName", config.MountContainerName) return nil } cmd := fuseContainer.Lifecycle.PreStop.Exec.Command - klog.Infof("[AppController] exec cmd [%s] in container %s of pod [%s] in [%s]", cmd, config.MountContainerName, pod.Name, pod.Namespace) + log.Info("exec cmd in container of pod", "command", cmd, "cnName", config.MountContainerName) stdout, stderr, err := a.K8sClient.ExecuteInContainer(pod.Name, pod.Namespace, fuseContainer.Name, cmd) if err != nil { if strings.Contains(stderr, "not mounted") || @@ -101,10 +106,10 @@ func (a *AppController) umountFuseSidecar(pod *corev1.Pod, fuseContainer corev1. return nil } if strings.Contains(err.Error(), "exit code 137") { - klog.Warningf("[AppController] exec with exit code 137, ignore it. error: %v", err) + log.Error(err, "exec with exit code 137, ignore it.") return nil } - klog.Errorf("[AppController] error: %v; exec stdout: %s; exec stderr: %s", err, stdout, stderr) + log.Error(err, "exec error", "stdout", stdout, "stderr", stderr) } return } @@ -118,43 +123,43 @@ func (a *AppController) SetupWithManager(mgr ctrl.Manager) error { CreateFunc: func(event event.CreateEvent) bool { pod, ok := event.Object.(*corev1.Pod) if !ok { - klog.V(6).Infof("[pod.onCreate] can not turn into pod, Skip. object: %v", event.Object) + appCtrlLog.V(1).Info("[pod.onCreate] can not turn into pod, Skip.", "object", event.Object) return false } if !ShouldInQueue(pod) { - klog.V(6).Infof("[pod.onCreate] skip due to shouldRequeue false. pod: [%s] in [%s]", pod.Name, pod.Namespace) + appCtrlLog.V(1).Info("[pod.onCreate] skip due to shouldRequeue false.", "name", pod.Name, "namespace", pod.Namespace) return false } - klog.V(6).Infof("[pod.onCreate] pod [%s] in [%s] requeue", pod.Name, pod.Namespace) + appCtrlLog.V(1).Info("[pod.onCreate] pod in requeue", "name", pod.Name, "namespace", pod.Namespace) return true }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { podNew, ok := updateEvent.ObjectNew.(*corev1.Pod) if !ok { - klog.V(6).Infof("[pod.onUpdate] can not turn into pod, Skip. object: %v", updateEvent.ObjectNew) + appCtrlLog.V(1).Info("[pod.onUpdate] can not turn into pod, Skip.", "object", updateEvent.ObjectNew) return false } podOld, ok := updateEvent.ObjectOld.(*corev1.Pod) if !ok { - klog.V(6).Infof("[pod.onUpdate] can not turn into pod, Skip. object: %v", updateEvent.ObjectOld) + appCtrlLog.V(1).Info("[pod.onUpdate] can not turn into pod, Skip.", "object", updateEvent.ObjectOld) return false } if podNew.GetResourceVersion() == podOld.GetResourceVersion() { - klog.V(6).Infof("[pod.onUpdate] Skip due to resourceVersion not changed, pod: [%s] in [%s]", podNew.Name, podNew.Namespace) + appCtrlLog.V(1).Info("[pod.onUpdate] Skip due to resourceVersion not changed", "name", podNew.Name, "namespace", podNew.Namespace) return false } // ignore if it's not fluid label pod if !ShouldInQueue(podNew) { - klog.V(6).Infof("[pod.onUpdate] skip due to shouldRequeue false. pod: [%s] in [%s]", podNew.Name, podNew.Namespace) + appCtrlLog.V(1).Info("[pod.onUpdate] skip due to shouldRequeue false.", "name", podNew.Name, "namespace", podNew.Namespace) return false } - klog.V(6).Infof("[pod.onUpdate] pod [%s] in [%s] requeue", podNew.Name, podNew.Namespace) + appCtrlLog.V(1).Info("[pod.onUpdate] pod requeue", "name", podNew.Name, "namespace", podNew.Namespace) return true }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { @@ -169,15 +174,17 @@ func ShouldInQueue(pod *corev1.Pod) bool { return false } + log := appCtrlLog.WithValues("pod", pod.Name, "namespace", pod.Namespace) + // ignore if it's not fluid label pod if util.CheckExpectValue(pod.Labels, config.InjectSidecarDisable, config.True) { - klog.V(6).Infof("Sidecar inject disabled in pod [%s] in [%s] labels %v, skip.", pod.Name, pod.Namespace, pod.Labels) + log.V(1).Info("Sidecar inject disabled in pod in labels, skip.", "labels", pod.Labels) return false } // ignore if restartPolicy is Always if pod.Spec.RestartPolicy == corev1.RestartPolicyAlways { - klog.V(6).Infof("pod [%s] in [%s] restart policy always, skip.", pod.Name, pod.Namespace) + log.V(1).Info("pod restart policy always, skip.") return false } @@ -190,29 +197,29 @@ func ShouldInQueue(pod *corev1.Pod) bool { } } if !exist { - klog.V(6).Infof("There are no juicefs sidecar in pod [%s] in [%s].", pod.Name, pod.Namespace) + log.V(1).Info("There are no juicefs sidecar in pod") return false } // ignore if pod status is not running if pod.Status.Phase != corev1.PodRunning || len(pod.Status.ContainerStatuses) < 2 { - klog.V(6).Infof("Pod [%s] in [%s] status is not running or containerStatus less than 2.", pod.Name, pod.Namespace) + log.V(1).Info("Pod status is not running or containerStatus less than 2.") return false } // reconcile if all app containers exit 0 and fuse container not exit for _, containerStatus := range pod.Status.ContainerStatuses { if !strings.Contains(containerStatus.Name, config.MountContainerName) { - klog.V(6).Infof("container %s in pod [%s] in [%s] status: %v", containerStatus.Name, pod.Name, pod.Namespace, containerStatus) + log.V(1).Info("container status", "container", containerStatus.Name, "status", containerStatus) if containerStatus.State.Terminated == nil { - klog.V(6).Infof("container %s in pod [%s] in [%s] not exited", containerStatus.Name, pod.Name, pod.Namespace) + log.V(1).Info("container not exited", "container", containerStatus.Name) // container not exist return false } } if strings.Contains(containerStatus.Name, config.MountContainerName) { if containerStatus.State.Running == nil { - klog.V(6).Infof("juicefs fuse client in pod [%s] in [%s] not running", pod.Name, pod.Namespace) + log.V(1).Info("juicefs fuse client in pod not running") return false } } diff --git a/pkg/controller/job_controller.go b/pkg/controller/job_controller.go index 2a0f68a6f6..a2705cbd16 100644 --- a/pkg/controller/job_controller.go +++ b/pkg/controller/job_controller.go @@ -23,7 +23,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -37,6 +37,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) +var ( + jobCtrlLog = klog.NewKlogr().WithName("job-controller") +) + type JobController struct { *k8sclient.K8sClient } @@ -46,20 +50,20 @@ func NewJobController(client *k8sclient.K8sClient) *JobController { } func (m *JobController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(6).Infof("Receive job %s %s", request.Name, request.Namespace) + jobCtrlLog.V(1).Info("Receive job", "name", request.Name, "namespace", request.Namespace) job, err := m.GetJob(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("get job %s error: %v", request.Name, err) + jobCtrlLog.Error(err, "get job error", "name", request.Name) return reconcile.Result{}, err } if job == nil { - klog.V(6).Infof("job %s has been deleted.", request.Name) + jobCtrlLog.V(1).Info("job has been deleted.", "name", request.Name) return reconcile.Result{}, nil } // check job deleted if job.DeletionTimestamp != nil { - klog.V(6).Infof("job %s is deleted", job.Name) + jobCtrlLog.V(1).Info("job is deleted", "name", job.Name) return reconcile.Result{}, nil } @@ -69,9 +73,9 @@ func (m *JobController) Reconcile(ctx context.Context, request reconcile.Request // when job not set nodeName, don't need to check csi node if resource.IsJobShouldBeRecycled(job) { // try to delete job - klog.Infof("job %s completed but not be recycled automatically, delete it", job.Name) + jobCtrlLog.Info("job completed but not be recycled automatically, delete it", "name", job.Name) if err := m.DeleteJob(ctx, job.Name, job.Namespace); err != nil { - klog.Errorf("delete job %s error %v", job.Name, err) + jobCtrlLog.Error(err, "delete job error", "name", job.Name) return reconcile.Result{}, err } } @@ -88,20 +92,20 @@ func (m *JobController) Reconcile(ctx context.Context, request reconcile.Request } csiPods, err := m.ListPod(ctx, config.Namespace, &labelSelector, &fieldSelector) if err != nil { - klog.Errorf("list pod by label %s and field %s error: %v", config.CSINodeLabelValue, nodeName, err) + jobCtrlLog.Error(err, "list pod by label and field error", "label", config.CSINodeLabelValue, "node", nodeName) return reconcile.Result{}, err } if len(csiPods) == 0 { - klog.Infof("csi node in %s not exists, job %s should be recycled.", nodeName, job.Name) + jobCtrlLog.Info("csi node not exists, job should be recycled.", "node", nodeName, "name", job.Name) needRecycled = true } // if csi node not exist, or job should be recycled itself, delete it if needRecycled || resource.IsJobShouldBeRecycled(job) { - klog.Infof("recycle job %s", job.Name) + jobCtrlLog.Info("recycle job %s", "name", job.Name) err = m.DeleteJob(ctx, job.Name, job.Namespace) if err != nil { - klog.Errorf("delete job %s error: %v", job.Name, err) + jobCtrlLog.Error(err, "delete job error", "name", job.Name) return reconcile.Result{Requeue: true}, err } } @@ -118,45 +122,45 @@ func (m *JobController) SetupWithManager(mgr ctrl.Manager) error { return c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { job := event.Object.(*batchv1.Job) - klog.V(6).Infof("watch job %s created", job.GetName()) + jobCtrlLog.V(1).Info("watch job created", "name", job.GetName()) // check job deleted if job.DeletionTimestamp != nil { - klog.V(6).Infof("job %s is deleted", job.Name) + jobCtrlLog.V(1).Info("job is deleted", "name", job.Name) return false } return true }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { jobNew, ok := updateEvent.ObjectNew.(*batchv1.Job) - klog.V(6).Infof("watch job %s updated", jobNew.GetName()) + jobCtrlLog.V(1).Info("watch job updated", "name", jobNew.GetName()) if !ok { - klog.V(6).Infof("job.onUpdateFunc Skip object: %v", updateEvent.ObjectNew) + jobCtrlLog.V(1).Info("job.onUpdateFunc Skip object", "object", updateEvent.ObjectNew) return false } jobOld, ok := updateEvent.ObjectOld.(*batchv1.Job) if !ok { - klog.V(6).Infof("job.onUpdateFunc Skip object: %v", updateEvent.ObjectOld) + jobCtrlLog.V(1).Info("job.onUpdateFunc Skip object", "object", updateEvent.ObjectOld) return false } if jobNew.GetResourceVersion() == jobOld.GetResourceVersion() { - klog.V(6).Info("job.onUpdateFunc Skip due to resourceVersion not changed") + jobCtrlLog.V(1).Info("job.onUpdateFunc Skip due to resourceVersion not changed") return false } // check job deleted if jobNew.DeletionTimestamp != nil { - klog.V(6).Infof("job %s is deleted", jobNew.Name) + jobCtrlLog.V(1).Info("job is deleted", "name", jobNew.Name) return false } return true }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { job := deleteEvent.Object.(*batchv1.Job) - klog.V(6).Infof("watch job %s deleted", job.GetName()) + jobCtrlLog.V(1).Info("watch job deleted", "name", job.GetName()) // check job deleted if job.DeletionTimestamp != nil { - klog.V(6).Infof("job %s is deleted", job.Name) + jobCtrlLog.V(1).Info("job is deleted", "name", job.Name) return false } return true diff --git a/pkg/controller/mount_controller.go b/pkg/controller/mount_controller.go index ba20ccd363..37b5178850 100644 --- a/pkg/controller/mount_controller.go +++ b/pkg/controller/mount_controller.go @@ -23,7 +23,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -38,6 +38,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) +var ( + mountCtrlLog = klog.NewKlogr().WithName("mount-controller") +) + type MountController struct { *k8sclient.K8sClient } @@ -47,20 +51,20 @@ func NewMountController(client *k8sclient.K8sClient) *MountController { } func (m MountController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(6).Infof("Receive pod %s %s", request.Name, request.Namespace) + mountCtrlLog.V(1).Info("Receive pod", "name", request.Name, "namespace", request.Namespace) mountPod, err := m.GetPod(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("get pod %s error: %v", request.Name, err) + mountCtrlLog.Error(err, "get pod error", "name", request.Name) return reconcile.Result{}, err } if mountPod == nil { - klog.V(6).Infof("pod %s has been deleted.", request.Name) + mountCtrlLog.V(1).Info("pod has been deleted.", "name", request.Name) return reconcile.Result{}, nil } // check mount pod deleted if mountPod.DeletionTimestamp == nil { - klog.V(6).Infof("pod %s is not deleted", mountPod.Name) + mountCtrlLog.V(1).Info("pod is not deleted", "name", mountPod.Name) return reconcile.Result{}, nil } if !util.ContainsString(mountPod.GetFinalizers(), config.Finalizer) { @@ -78,19 +82,19 @@ func (m MountController) Reconcile(ctx context.Context, request reconcile.Reques } csiPods, err := m.ListPod(ctx, config.Namespace, &labelSelector, &fieldSelector) if err != nil { - klog.Errorf("list pod by label %s and field %s error: %v", config.CSINodeLabelValue, nodeName, err) + mountCtrlLog.Error(err, "list pod by label and field error", "labels", config.CSINodeLabelValue, "node", nodeName) return reconcile.Result{}, err } if len(csiPods) > 0 { - klog.V(6).Infof("csi node in %s exists.", nodeName) + mountCtrlLog.V(1).Info("csi node exists.", "node", nodeName) return reconcile.Result{}, nil } - klog.Infof("csi node in %s did not exist. remove finalizer of pod %s", nodeName, mountPod.Name) + mountCtrlLog.Info("csi node did not exist. remove finalizer of pod", "node", nodeName, "name", mountPod.Name) // remove finalizer err = resource.RemoveFinalizer(ctx, m.K8sClient, mountPod, config.Finalizer) if err != nil { - klog.Errorf("remove finalizer of pod %s error: %v", mountPod.Name, err) + mountCtrlLog.Error(err, "remove finalizer of pod error", "name", mountPod.Name) } return reconcile.Result{}, err @@ -105,10 +109,10 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { return c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { pod := event.Object.(*corev1.Pod) - klog.V(6).Infof("watch pod %s created", pod.GetName()) + mountCtrlLog.V(1).Info("watch pod created", "name", pod.GetName()) // check mount pod deleted if pod.DeletionTimestamp == nil { - klog.V(6).Infof("pod %s is not deleted", pod.Name) + mountCtrlLog.V(1).Info("pod is not deleted", "name", pod.Name) return false } if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { @@ -118,25 +122,25 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { podNew, ok := updateEvent.ObjectNew.(*corev1.Pod) - klog.V(6).Infof("watch pod %s updated", podNew.GetName()) + mountCtrlLog.V(1).Info("watch pod updated", "name", podNew.GetName()) if !ok { - klog.V(6).Infof("pod.onUpdateFunc Skip object: %v", updateEvent.ObjectNew) + mountCtrlLog.V(1).Info("pod.onUpdateFunc Skip object", "object", updateEvent.ObjectNew) return false } podOld, ok := updateEvent.ObjectOld.(*corev1.Pod) if !ok { - klog.V(6).Infof("pod.onUpdateFunc Skip object: %v", updateEvent.ObjectOld) + mountCtrlLog.V(1).Info("pod.onUpdateFunc Skip object", "object", updateEvent.ObjectOld) return false } if podNew.GetResourceVersion() == podOld.GetResourceVersion() { - klog.V(6).Info("pod.onUpdateFunc Skip due to resourceVersion not changed") + mountCtrlLog.V(1).Info("pod.onUpdateFunc Skip due to resourceVersion not changed") return false } // check mount pod deleted if podNew.DeletionTimestamp == nil { - klog.V(6).Infof("pod %s is not deleted", podNew.Name) + mountCtrlLog.V(1).Info("pod is not deleted", "name", podNew.Name) return false } if !util.ContainsString(podNew.GetFinalizers(), config.Finalizer) { @@ -146,10 +150,10 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { pod := deleteEvent.Object.(*corev1.Pod) - klog.V(6).Infof("watch pod %s deleted", pod.GetName()) + mountCtrlLog.V(1).Info("watch pod deleted", "name", pod.GetName()) // check mount pod deleted if pod.DeletionTimestamp == nil { - klog.V(6).Infof("pod %s is not deleted", pod.Name) + mountCtrlLog.V(1).Info("pod is not deleted", "name", pod.Name) return false } if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { diff --git a/pkg/controller/mountinfo.go b/pkg/controller/mountinfo.go index 1338ed6c17..c7179f3fd5 100644 --- a/pkg/controller/mountinfo.go +++ b/pkg/controller/mountinfo.go @@ -22,12 +22,14 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - "k8s.io/klog" + "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) +var miLog = klog.NewKlogr().WithName("mountinfo") + type mountInfoTable struct { mis []k8sMount.MountInfo // key is pod UID @@ -63,7 +65,7 @@ func (mit *mountInfoTable) setPodsStatus(podList *corev1.PodList) { if pod.DeletionTimestamp != nil { deleted = true } - klog.V(6).Infof("set pod %s deleted status %v", pod.Name, deleted) + miLog.V(1).Info("set pod deleted status", "name", pod.Name, "deleted status", deleted) mit.deletedPods[string(pod.UID)] = deleted } } diff --git a/pkg/controller/pod_controller.go b/pkg/controller/pod_controller.go index 2216eb0737..64e2332b8f 100644 --- a/pkg/controller/pod_controller.go +++ b/pkg/controller/pod_controller.go @@ -24,7 +24,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/klog" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" ctrl "sigs.k8s.io/controller-runtime" @@ -40,6 +40,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util" ) +var ( + podCtrlLog = klog.NewKlogr().WithName("pod-controller") +) + type PodController struct { *k8sclient.K8sClient } @@ -49,27 +53,27 @@ func NewPodController(client *k8sclient.K8sClient) *PodController { } func (m *PodController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(6).Infof("Receive pod %s %s", request.Name, request.Namespace) + podCtrlLog.V(1).Info("Receive pod", "name", request.Name, "namespace", request.Namespace) ctx, cancel := context.WithTimeout(ctx, config.ReconcileTimeout) defer cancel() mountPod, err := m.GetPod(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("get pod %s error: %v", request.Name, err) + podCtrlLog.Error(err, "get pod error", "name", request.Name) return reconcile.Result{}, err } if mountPod == nil { - klog.V(6).Infof("pod %s has been deleted.", request.Name) + podCtrlLog.V(1).Info("pod has been deleted.", "name", request.Name) return reconcile.Result{}, nil } if mountPod.Spec.NodeName != config.NodeName && mountPod.Spec.NodeSelector["kubernetes.io/hostname"] != config.NodeName { - klog.V(6).Infof("pod %s/%s is not on node %s, skipped", mountPod.Namespace, mountPod.Name, config.NodeName) + podCtrlLog.V(1).Info("pod is not on node, skipped", "namespace", mountPod.Namespace, "name", mountPod.Name, "node", config.NodeName) return reconcile.Result{}, nil } // get mount info mit := newMountInfoTable() if err := mit.parse(); err != nil { - klog.Errorf("doReconcile ParseMountInfo: %v", err) + podCtrlLog.Error(err, "doReconcile ParseMountInfo error") return reconcile.Result{}, err } @@ -82,7 +86,7 @@ func (m *PodController) Reconcile(ctx context.Context, request reconcile.Request fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} podLists, err := m.K8sClient.ListPod(ctx, "", &labelSelector, fieldSelector) if err != nil { - klog.Errorf("reconcile ListPod: %v", err) + podCtrlLog.Error(err, "reconcile ListPod error") return reconcile.Result{}, err } @@ -97,7 +101,7 @@ func (m *PodController) Reconcile(ctx context.Context, request reconcile.Request result, err := podDriver.Run(ctx, mountPod) if err != nil { - klog.Errorf("Driver check pod %s error: %v", mountPod.Name, err) + podCtrlLog.Error(err, "Driver check pod error", "podName", mountPod.Name) return reconcile.Result{}, err } if mountPod.Annotations[config.DeleteDelayAtKey] != "" { @@ -139,7 +143,7 @@ func (m *PodController) SetupWithManager(mgr ctrl.Manager) error { if pod.Spec.NodeName != config.NodeName && pod.Spec.NodeSelector["kubernetes.io/hostname"] != config.NodeName { return false } - klog.V(6).Infof("watch pod %s created", pod.GetName()) + podCtrlLog.V(1).Info("watch pod created", "podName", pod.GetName()) return true }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { @@ -147,20 +151,20 @@ func (m *PodController) SetupWithManager(mgr ctrl.Manager) error { if podNew.Spec.NodeName != config.NodeName && podNew.Spec.NodeSelector["kubernetes.io/hostname"] != config.NodeName { return false } - klog.V(6).Infof("watch pod %s updated", podNew.GetName()) + podCtrlLog.V(1).Info("watch pod updated", "podName", podNew.GetName()) if !ok { - klog.V(6).Infof("pod.onUpdateFunc Skip object: %v", updateEvent.ObjectNew) + podCtrlLog.V(1).Info("pod.onUpdateFunc Skip object", "objectName", updateEvent.ObjectNew) return false } podOld, ok := updateEvent.ObjectOld.(*corev1.Pod) if !ok { - klog.V(6).Infof("pod.onUpdateFunc Skip object: %v", updateEvent.ObjectOld) + podCtrlLog.V(1).Info("pod.onUpdateFunc Skip object", "objectName", updateEvent.ObjectOld) return false } if podNew.GetResourceVersion() == podOld.GetResourceVersion() { - klog.V(6).Info("pod.onUpdateFunc Skip due to resourceVersion not changed") + podCtrlLog.V(1).Info("pod.onUpdateFunc Skip due to resourceVersion not changed") return false } return true @@ -170,7 +174,7 @@ func (m *PodController) SetupWithManager(mgr ctrl.Manager) error { if pod.Spec.NodeName != config.NodeName && pod.Spec.NodeSelector["kubernetes.io/hostname"] != config.NodeName { return false } - klog.V(6).Infof("watch pod %s deleted", pod.GetName()) + podCtrlLog.V(1).Info("watch pod deleted", "podName", pod.GetName()) return true }, }) diff --git a/pkg/controller/pod_driver.go b/pkg/controller/pod_driver.go index 3ba98a26e8..3d463a948c 100644 --- a/pkg/controller/pod_driver.go +++ b/pkg/controller/pod_driver.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/utils/mount" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -46,6 +46,10 @@ const ( defaultTargetMountCounts = 5 ) +var ( + podDriverLog = klog.NewKlogr().WithName("pod-driver") +) + type PodDriver struct { Client *k8sclient.K8sClient handlers map[podStatus]podHandler @@ -90,7 +94,7 @@ func (p *PodDriver) SetMountInfo(mit mountInfoTable) { func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) (Result, error) { podStatus := getPodStatus(current) - klog.V(6).Infof("[PodDriver] start handle pod %s/%s, status: %s", current.Namespace, current.Name, podStatus) + podDriverLog.V(1).Info("start handle pod", "namespace", current.Namespace, "name", current.Name, "status", podStatus) // check refs in mount pod annotation first, delete ref that target pod is not found err := p.checkAnnotations(ctx, current) if err != nil { @@ -151,7 +155,7 @@ func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error _, exists := p.mit.deletedPods[targetUid] if !exists { // target pod is deleted - klog.V(5).Infof("[PodDriver] get app pod %s deleted in annotations of mount pod, remove its ref.", targetUid) + podDriverLog.Info("get app pod deleted in annotations of mount pod, remove its ref.", "appId", targetUid) delAnnotations = append(delAnnotations, k) continue } @@ -198,17 +202,17 @@ func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error }, pod.Name, fmt.Errorf("can not delete pod")) } // if there are no refs or after delay time, delete it - klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name) + podDriverLog.Info("There are no refs in pod annotation, delete it", "podName", pod.Name) if err := p.Client.DeletePod(ctx, pod); err != nil { - klog.Errorf("Delete pod %s error: %v", pod.Name, err) + podDriverLog.Error(err, "Delete pod", "podName", pod.Name) return err } // for old version // delete related secret secretName := pod.Name + "-secret" - klog.V(6).Infof("delete related secret of pod: %s", secretName) + podDriverLog.V(1).Info("delete related secret of pod", "secretName", secretName) if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); !apierrors.IsNotFound(err) && err != nil { - klog.V(6).Infof("Delete secret %s error: %v", secretName, err) + podDriverLog.V(1).Info("Delete secret error", "secretName", secretName) } } } @@ -220,6 +224,7 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul if pod == nil { return Result{}, nil } + log := podDriverLog.WithName("podErrorHandler").WithValues("podName", pod.Name) hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) @@ -230,13 +235,13 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul // check resource err if resource.IsPodResourceError(pod) { - klog.V(5).Infof("[podErrorHandler]waitUtilMount: Pod %s failed because of resource.", pod.Name) + log.Info("Pod failed because of resource.") if resource.IsPodHasResource(*pod) { // if pod is failed because of resource, delete resource and deploy pod again. _ = resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer) - klog.V(5).Infof("Delete it and deploy again with no resource.") + log.Info("Delete it and deploy again with no resource.") if err := p.Client.DeletePod(ctx, pod); err != nil { - klog.Errorf("delete po:%s err:%v", pod.Name, err) + log.Error(err, "delete pod err") return Result{}, nil } isDeleted := false @@ -244,7 +249,7 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul for { _, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace) if err == nil { - klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace) + log.V(1).Info("pod still exists wait.") time.Sleep(time.Microsecond * 500) continue } @@ -258,10 +263,10 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded { break } - klog.Errorf("[podErrorHandler] get mountPod err:%v, podName: %s", err, pod.Name) + log.Error(err, "get mountPod err") } if !isDeleted { - klog.Errorf("[podErrorHandler] Old pod %s %s deleting timeout", pod.Name, config.Namespace) + log.Info("Old pod deleting timeout") return Result{RequeueImmediately: true}, nil } var newPod = &corev1.Pod{ @@ -277,38 +282,39 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul resource.DeleteResourceOfPod(newPod) err := mkrMp(ctx, *newPod) if err != nil { - klog.Errorf("[podDeletedHandler] mkdir mount point of pod %s err:%v", pod.Name, err) + log.Error(err, "mkdir mount point of pod") } _, err = p.Client.CreatePod(ctx, newPod) if err != nil { - klog.Errorf("[podErrorHandler] create pod:%s err:%v", pod.Name, err) + log.Error(err, "create pod") } } else { - klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.") + log.Info("mountPod PodResourceError, but pod no resource, do nothing.") } } - klog.Errorf("[podErrorHandler]: Pod %s/%s with error: %s, %s", pod.Namespace, pod.Name, pod.Status.Reason, pod.Status.Message) + log.Info("Pod is error", "reason", pod.Status.Reason, "message", pod.Status.Message) return Result{}, nil } // podDeletedHandler handles mount pod that will be deleted func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Result, error) { if pod == nil { - klog.Errorf("get nil pod") + podDriverLog.Info("get nil pod") return Result{}, nil } - klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace) + log := podDriverLog.WithName("podDeletedHandler").WithValues("podName", pod.Name) + log.Info("Pod is to be deleted.") // pod with no finalizer if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { - klog.V(6).Infof("Pod %s in namespace %s has no finalizer, skip deleting", pod.Name, pod.Namespace) + log.V(1).Info("Pod has no finalizer, skip deleting") // do nothing return Result{}, nil } // remove finalizer of pod if err := resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil { - klog.Errorf("[podDeletedHandler] remove pod %s finalizer err:%v", pod.Name, err) + log.Error(err, "remove pod finalizer error") return Result{}, err } @@ -316,19 +322,19 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res // pod with resource error if resource.IsPodResourceError(pod) { - klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name) + log.V(1).Info("The pod is PodResourceError, podDeletedHandler skip delete the pod") return Result{}, nil } // get mount point sourcePath, _, err := resource.GetMountPathOfPod(*pod) if err != nil { - klog.Error(err) + log.Error(err, "get mount point error") return Result{}, err } // check if it needs to create new one - klog.V(6).Infof("Annotations:%v", pod.Annotations) + log.V(1).Info("Get pod annotations", "annotations", pod.Annotations) if pod.Annotations == nil { return Result{}, nil } @@ -352,11 +358,11 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res util.UmountPath(ctx, sourcePath) // clean mount point err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - klog.V(5).Infof("Clean mount point : %s", sourcePath) + log.Info("Clean mount point", "mountPath", sourcePath) return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false) }) if err != nil { - klog.Errorf("[podDeletedHandler] Clean mount point %s error: %v, podName:%s", sourcePath, err, pod.Name) + log.Error(err, "Clean mount point error", "mountPath", sourcePath) } // cleanup cache should always complete, don't set timeout go p.CleanUpCache(context.TODO(), pod) @@ -370,13 +376,13 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res defer lock.Unlock() // create - klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name) + log.Info("pod targetPath not empty, need create pod") // check pod delete for { po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace) if err == nil && po.DeletionTimestamp != nil { - klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace) + log.V(1).Info("pod is being deleted, waiting") time.Sleep(time.Millisecond * 500) continue } @@ -397,9 +403,9 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res Spec: pod.Spec, } controllerutil.AddFinalizer(newPod, config.Finalizer) - klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace) + log.Info("Need to create pod") if err := p.applyConfigPatch(ctx, newPod); err != nil { - klog.Errorf("apply config patch error, will ignore, err: %v", err) + log.Error(err, "apply config patch error, will ignore") } if !util.SupportFusePass(newPod.Spec.Containers[0].Image) { if oldSupportFusePass { @@ -415,7 +421,7 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res return nil }) if err == nil { - klog.Infof("start to umount: %s", sourcePath) + log.Info("start to umount: %s", "mountPath", sourcePath) _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { util.UmountPath(ctx, sourcePath) return nil @@ -424,15 +430,15 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res } err = mkrMp(ctx, *newPod) if err != nil { - klog.Errorf("[podDeletedHandler] mkdir mount point of pod %s err:%v", pod.Name, err) + log.Error(err, "mkdir mount point of pod") } _, err = p.Client.CreatePod(ctx, newPod) if err != nil { - klog.Errorf("[podDeletedHandler] Create pod:%s err:%v", pod.Name, err) + log.Error(err, "Create pod") } return Result{RequeueImmediately: true}, err } - klog.Errorf("[podDeletedHandler] Get pod: %s err:%v", pod.Name, err) + log.Error(err, "Get pod error") return Result{}, nil } @@ -445,12 +451,12 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res po.Annotations[k] = v } if err := resource.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil { - klog.Errorf("[podDeletedHandler] Update pod %s %s error: %v", po.Name, po.Namespace, err) + log.Error(err, "Update pod error") } return Result{}, err } err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace) - klog.V(5).Infof(err.Error()) + log.Error(err, "delete pod error") return Result{}, err } @@ -459,6 +465,7 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res if pod == nil { return Result{}, nil } + log := podDriverLog.WithName("podPendingHandler").WithValues("podName", pod.Name) hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) @@ -469,13 +476,13 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res // check resource err if resource.IsPodResourceError(pod) { - klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name) + log.Info("Pod failed because of resource.") if resource.IsPodHasResource(*pod) { // if pod is failed because of resource, delete resource and deploy pod again. _ = resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer) - klog.V(5).Infof("Delete it and deploy again with no resource.") + log.Info("Delete it and deploy again with no resource.") if err := p.Client.DeletePod(ctx, pod); err != nil { - klog.Errorf("delete po:%s err:%v", pod.Name, err) + log.Error(err, "delete pod error") return Result{}, nil } isDeleted := false @@ -483,7 +490,7 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res for { _, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace) if err == nil { - klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace) + log.V(1).Info("pod still exists wait.") time.Sleep(time.Microsecond * 500) continue } @@ -497,10 +504,10 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded { break } - klog.Errorf("[podPendingHandler] get mountPod err:%v, podName: %s", err, pod.Name) + log.Error(err, "get mountPod err") } if !isDeleted { - klog.Errorf("[podPendingHandler] Old pod %s %s deleting timeout", pod.Name, config.Namespace) + log.Info("Old pod deleting timeout") return Result{}, nil } var newPod = &corev1.Pod{ @@ -516,14 +523,14 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res resource.DeleteResourceOfPod(newPod) err := mkrMp(ctx, *newPod) if err != nil { - klog.Errorf("[podDeletedHandler] mkdir mount point of pod %s err:%v", pod.Name, err) + log.Error(err, "mkdir mount point of pod error") } _, err = p.Client.CreatePod(ctx, newPod) if err != nil { - klog.Errorf("[podPendingHandler] create pod:%s err:%v", pod.Name, err) + log.Error(err, "create pod error") } } else { - klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.") + log.Info("mountPod PodResourceError, but pod no resource, do nothing.") } } return Result{}, nil @@ -532,9 +539,9 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res // podReadyHandler handles mount pod that is ready func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Result, error) { if pod == nil { - klog.Errorf("[podReadyHandler] get nil pod") return Result{}, nil } + log := podDriverLog.WithName("podReadyHandler").WithValues("podName", pod.Name) if pod.Annotations == nil { return Result{}, nil @@ -542,7 +549,7 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul // get mount point mntPath, _, err := resource.GetMountPathOfPod(*pod) if err != nil { - klog.Error(err) + log.Error(err, "get mount point error") return Result{}, err } @@ -556,9 +563,9 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul err = resource.WaitUtilMountReady(ctx, pod.Name, mntPath, defaultCheckoutTimeout) if err != nil { if supFusePass { - klog.Errorf("[podReadyHandler] waitUtilMountReady pod %s error: %v", pod.Name, err) + log.Error(err, "pod is not ready within 60s") // mount pod hang probably, close fd and delete it - klog.Infof("close fd and delete pod %s", pod.Name) + log.Info("close fd and delete pod") fuse.GlobalFds.CloseFd(podHashVal) // umount it _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { @@ -567,7 +574,7 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul }) return Result{RequeueImmediately: true}, p.Client.DeletePod(ctx, pod) } - klog.Errorf("[podReadyHandler] waitUtilMountReady pod %s err: %v, don't do recovery", pod.Name, err) + log.Error(err, "pod is err, don't do recovery") return Result{}, err } @@ -575,11 +582,12 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul } func (p *PodDriver) recover(ctx context.Context, pod *corev1.Pod, mntPath string) error { + log := podDriverLog.WithName("recover").WithValues("podName", pod.Name) for k, target := range pod.Annotations { if k == util.GetReferenceKey(target) { mi := p.mit.resolveTarget(ctx, target) if mi == nil { - klog.Errorf("[podReadyHandler] pod %s target %s resolve fail", pod.Name, target) + log.Info("pod target resolve fail", "target", target) continue } @@ -598,43 +606,45 @@ func (p *PodDriver) recover(ctx context.Context, pod *corev1.Pod, mntPath string // recoverTarget recovers target path func (p *PodDriver) recoverTarget(ctx context.Context, podName, sourcePath string, ti *targetItem, mi *mountItem) error { + log := podDriverLog.WithName("recover").WithValues("podName", podName) switch ti.status { case targetStatusNotExist: - klog.Errorf("pod %s target %s not exists, item count:%d", podName, ti.target, ti.count) + log.Info("pod target not exists", "target", ti.target, "item count", ti.count) if ti.count > 0 { // target exist in /proc/self/mountinfo file // refer to this case: local target exist, but source which target binded has beed deleted // if target is for pod subpath (volumeMount.subpath), this will cause user pod delete failed, so we help kubelet umount it if mi.podDeleted { + log.Info("umount target", "count", ti.count) p.umountTarget(ti.target, ti.count) } } case targetStatusMounted: // normal, most likely happen - klog.V(6).Infof("pod %s target %s is normal mounted", podName, ti.target) + log.V(1).Info("target is normal mounted", "target", ti.target) case targetStatusNotMount: - klog.V(5).Infof("pod %s target %s is not mounted", podName, ti.target) + log.Info("target is not mounted", "target", ti.target) case targetStatusCorrupt: if ti.inconsistent { // source paths (found in /proc/self/mountinfo) which target binded is inconsistent // some unexpected things happened - klog.Errorf("pod %s target %s, source inconsistent", podName, ti.target) + log.Info("pod source inconsistent", "target", ti.target) break } if mi.podDeleted { - klog.V(6).Infof("pod %s target %s, user pod has been deleted, don't do recovery", podName, ti.target) + log.V(1).Info("user pod has been deleted, don't do recovery", "target", ti.target) break } // if not umountTarget, mountinfo file will increase unlimited // if we umount all the target items, `mountPropagation` will lose efficacy - klog.V(5).Infof("umount pod %s target %s before recover and remain mount count %d", podName, ti.target, defaultTargetMountCounts) + log.Info("umount pod target before recover", "target", ti.target, "remain mount count", defaultTargetMountCounts) // avoid umount target all, it will cause pod to write files in disk. err := p.umountTargetUntilRemain(ctx, mi, ti.target, defaultTargetMountCounts) if err != nil { - klog.Error(err) + log.Error(err, "umount target until remain error") return err } if ti.subpath != "" { @@ -644,22 +654,21 @@ func (p *PodDriver) recoverTarget(ctx context.Context, podName, sourcePath strin return err }) if err != nil { - klog.Errorf("pod %s target %s, stat volPath: %s err: %v, don't do recovery", podName, ti.target, sourcePath, err) + log.Error(err, "stat volPath error, don't do recovery", "target", ti.target, "mountPath", sourcePath) break } } - klog.V(5).Infof("pod %s target %s recover volPath:%s", podName, ti.target, sourcePath) + log.Info("recover volPath", "target", ti.target, "mountPath", sourcePath) mountOption := []string{"bind"} if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil { ms := fmt.Sprintf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err) - klog.Errorf(ms) + log.Error(err, "bind mount error") return fmt.Errorf(ms) } case targetStatusUnexpect: - ms := fmt.Sprintf("pod %s target %s reslove err:%v", podName, ti.target, ti.err) - klog.Errorf(ms) - return fmt.Errorf(ms) + log.Error(ti.err, "resolve target error") + return fmt.Errorf("pod %s target %s reslove err: %v", podName, ti.target, ti.err) } return nil } @@ -669,7 +678,6 @@ func (p *PodDriver) umountTarget(target string, count int) { if count <= 0 { return } - klog.V(5).Infof("umount target %d times", count) for i := 0; i < count; i++ { // ignore error _ = p.Unmount(target) @@ -710,6 +718,7 @@ func (p *PodDriver) umountTargetUntilRemain(ctx context.Context, basemi *mountIt // CleanUpCache clean up cache func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) { + log := podDriverLog.WithName("CleanUpCache").WithValues("podName", pod.Name) if pod.Annotations[config.CleanCache] != "true" { return } @@ -717,7 +726,7 @@ func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) { uniqueId := pod.Annotations[config.UniqueId] if uuid == "" && uniqueId == "" { // no necessary info, return - klog.Errorf("[CleanUpCache] Can't get uuid and uniqueId from pod %s annotation. skip cache clean.", pod.Name) + log.Info("Can't get uuid and uniqueId from pod annotation. skip cache clean.") return } @@ -734,17 +743,17 @@ func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) { if apierrors.IsTimeout(err) { break } - klog.V(5).Infof("[CleanUpCache] Get pod %s error %v. Skip clean cache.", pod.Name, err) + log.Info("Get pod error. Skip clean cache.", "error", err) return } time.Sleep(time.Microsecond * 500) } if !isDeleted { - klog.Errorf("[CleanUpCache] Mount pod %s not deleted in 3 min. Skip clean cache.", pod.Name) + log.Info("Mount pod not deleted in 3 min. Skip clean cache.") return } - klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s in node %s", uniqueId, config.NodeName) + log.Info("Cleanup cache of volume", "uniqueId", uniqueId, "node", config.NodeName) podMnt := podmount.NewPodMount(p.Client, p.SafeFormatAndMount) cacheDirs := []string{} for _, dir := range pod.Spec.Volumes { @@ -754,7 +763,7 @@ func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) { } image := pod.Spec.Containers[0].Image if err := podMnt.CleanCache(ctx, image, uuid, uniqueId, cacheDirs); err != nil { - klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s error %v", uniqueId, err) + log.Error(err, "Cleanup cache of volume error", "uniqueId", uniqueId) } } @@ -797,6 +806,7 @@ func (p *PodDriver) checkMountPodStuck(pod *corev1.Pod) { if pod == nil || getPodStatus(pod) != podDeleted { return } + log := podDriverLog.WithName("abortFuse").WithValues("podName", pod.Name) mountPoint, _, _ := resource.GetMountPathOfPod(*pod) defer func() { if runtime.GOOS == "linux" { @@ -817,14 +827,14 @@ func (p *PodDriver) checkMountPodStuck(pod *corev1.Pod) { for { select { case <-ctx.Done(): - klog.V(5).Infof("pod %s/%s may be stuck in terminating state, create a job to abort fuse connection", pod.Namespace, pod.Name) + log.Info("mount pod may be stuck in terminating state, create a job to abort fuse connection") if runtime.GOOS == "linux" { if devMinor, ok := util.DevMinorTableLoad(mountPoint); ok { if err := p.doAbortFuse(pod, devMinor); err != nil { - klog.Errorf("mountpod: %s, abort fuse connection error: %v", pod.Name, err) + log.Error(err, "abort fuse connection error") } } else { - klog.Errorf("mountpod: %s, can't find devMinor of mountPoint %s", pod.Name, mountPoint) + log.Info("can't find devMinor of mountPoint", "mount point", mountPoint) } } return @@ -839,9 +849,10 @@ func (p *PodDriver) checkMountPodStuck(pod *corev1.Pod) { } func (p *PodDriver) doAbortFuse(mountpod *corev1.Pod, devMinor uint32) error { + log := podDriverLog.WithName("abortFuse").WithValues("podName", mountpod.Name) job := builder.NewFuseAbortJob(mountpod, devMinor) if _, err := p.Client.CreateJob(context.Background(), job); err != nil { - klog.Errorf("mountpod: %s, create fuse abort job error: %v", mountpod.Name, err) + log.Error(err, "create fuse abort job error") return err } @@ -850,7 +861,7 @@ func (p *PodDriver) doAbortFuse(mountpod *corev1.Pod, devMinor uint32) error { defer cancel() for { if waitCtx.Err() == context.Canceled || waitCtx.Err() == context.DeadlineExceeded { - klog.Errorf("mountpod: %s, fuse abort job %s/%s timeout", mountpod.Name, job.Namespace, job.Name) + log.Error(waitCtx.Err(), "fuse abort job timeout", "namespace", job.Namespace, "name", job.Name) break } job, err := p.Client.GetJob(waitCtx, job.Name, job.Namespace) @@ -858,12 +869,12 @@ func (p *PodDriver) doAbortFuse(mountpod *corev1.Pod, devMinor uint32) error { break } if err != nil { - klog.Errorf("mountpod: %s, get fuse abort job %s/%s error: %v", mountpod.Name, err, job.Namespace, job.Name) + log.Error(err, "get fuse abort job error", "namespace", job.Namespace, "name", job.Name) time.Sleep(10 * time.Second) continue } if resource.IsJobCompleted(job) { - klog.V(5).Infof("mountpod: %s, fuse abort job %s/%s completed", mountpod.Name, job.Namespace, job.Name) + log.Info("fuse abort job completed", "namespace", job.Namespace, "name", job.Name) break } time.Sleep(10 * time.Second) @@ -888,14 +899,14 @@ func mkrMp(ctx context.Context, pod corev1.Pod) error { if !shouldMkrMp { return nil } - klog.V(6).Infof("Prepare mountpoint for pod %s", pod.Name) + podDriverLog.V(1).Info("Prepare mountpoint for pod", "podName", pod.Name) // mkdir mountpath // get mount point var mntPath string var err error mntPath, _, err = resource.GetMountPathOfPod(pod) if err != nil { - klog.Error(err) + podDriverLog.Error(err, "get mount point error") return err } err = util.DoWithTimeout(ctx, 3*time.Second, func() error { diff --git a/pkg/controller/reconciler.go b/pkg/controller/reconciler.go index 9110893c85..f0aa7fd7c0 100644 --- a/pkg/controller/reconciler.go +++ b/pkg/controller/reconciler.go @@ -25,7 +25,7 @@ import ( "golang.org/x/sync/errgroup" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" @@ -38,6 +38,10 @@ const ( maxRetryPeriod = 300 * time.Second ) +var ( + reconcilerLog = klog.NewKlogr().WithName("reconciler-controller") +) + type PodReconciler struct { mount.SafeFormatAndMount *k8sclient.K8sClient @@ -63,7 +67,7 @@ func StartReconciler() error { // gen podDriver k8sClient, err := k8sclient.NewClient() if err != nil { - klog.V(5).Infof("Could not create k8s client %v", err) + reconcilerLog.Error(err, "Could not create k8s client") return err } @@ -88,11 +92,11 @@ func doReconcile(ks *k8sclient.K8sClient, kc *k8sclient.KubeletClient) { mit := newMountInfoTable() podList, err := kc.GetNodeRunningPods() if err != nil { - klog.Errorf("doReconcile GetNodeRunningPods: %v", err) + reconcilerLog.Error(err, "doReconcile GetNodeRunningPods error") goto finish } if err := mit.parse(); err != nil { - klog.Errorf("doReconcile ParseMountInfo: %v", err) + reconcilerLog.Error(err, "doReconcile ParseMountInfo error") goto finish } @@ -129,7 +133,7 @@ func doReconcile(ks *k8sclient.K8sClient, kc *k8sclient.KubeletClient) { select { case <-ctx.Done(): - klog.Infof("goroutine of pod %s cancel", pod.Name) + reconcilerLog.Info("goroutine of pod cancel", "name", pod.Name) return nil default: if !backOff.IsInBackOffSinceUpdate(backOffID, backOff.Clock.Now()) { @@ -142,7 +146,7 @@ func doReconcile(ks *k8sclient.K8sClient, kc *k8sclient.KubeletClient) { result, err := podDriver.Run(ctx, pod) lastStatus.syncAt = time.Now() if err != nil { - klog.Errorf("Driver check pod %s error, will retry: %v", pod.Name, err) + reconcilerLog.Error(err, "Driver check pod error, will retry", "name", pod.Name) backOff.Next(backOffID, time.Now()) lastStatus.nextSyncAt = time.Now() return err diff --git a/pkg/controller/secret_controller.go b/pkg/controller/secret_controller.go index 7e7674a6e3..9e01481707 100644 --- a/pkg/controller/secret_controller.go +++ b/pkg/controller/secret_controller.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/klog" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -39,6 +39,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) +var ( + secretCtrlLog = klog.NewKlogr().WithName("secret-controller") +) + type SecretController struct { *k8sclient.K8sClient } @@ -74,9 +78,9 @@ func checkAndCleanOrphanSecret(ctx context.Context, client *k8sclient.K8sClient, // check if the pod still exists podName := strings.TrimSuffix(secrets.Name, "-secret") if _, err := client.GetPod(ctx, podName, secrets.Namespace); k8serrors.IsNotFound(err) { - klog.V(5).Infof("orphan secret %s found, delete it", secrets.Name) + secretCtrlLog.Info("orphan secret found, delete it", "name", secrets.Name) if err := client.DeleteSecret(ctx, secrets.Name, secrets.Namespace); err != nil { - klog.Errorf("delete secret %s error: %v", secrets.Name, err) + secretCtrlLog.Error(err, "delete secret error", "name", secrets.Name) return err } return nil @@ -86,28 +90,28 @@ func checkAndCleanOrphanSecret(ctx context.Context, client *k8sclient.K8sClient, } func (m *SecretController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(6).Infof("Receive secret %s %s", request.Name, request.Namespace) + secretCtrlLog.V(1).Info("Receive secret", "name", request.Name, "namespace", request.Namespace) secrets, err := m.GetSecret(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("get secret %s error: %v", request.Name, err) + secretCtrlLog.Error(err, "get secret error", "name", request.Name) return reconcile.Result{}, err } if secrets == nil { - klog.V(6).Infof("secret %s has been deleted.", request.Name) + secretCtrlLog.V(1).Info("secret has been deleted.", "name", request.Name) return reconcile.Result{}, nil } if err := checkAndCleanOrphanSecret(ctx, m.K8sClient, secrets); err != nil { - klog.Warningf("check and clean orphan secret %s error: %v", request.Name, err) + secretCtrlLog.Error(err, "check and clean orphan secret error", "name", request.Name) return reconcile.Result{}, err } if _, found := secrets.Data["token"]; !found { - klog.V(6).Infof("token not found in secret %s", request.Name) + secretCtrlLog.V(1).Info("token not found in secret", "name", request.Name) return reconcile.Result{}, nil } if _, found := secrets.Data["name"]; !found { - klog.V(6).Infof("name not found in secret %s", request.Name) + secretCtrlLog.V(1).Info("name not found in secret", "name", request.Name) return reconcile.Result{}, nil } jfs := juicefs.NewJfsProvider(nil, nil) @@ -127,14 +131,14 @@ func (m *SecretController) Reconcile(ctx context.Context, request reconcile.Requ jfsSetting.ClientConfPath = tempConfDir output, err := jfs.AuthFs(ctx, secretsMap, jfsSetting, true) if err != nil { - klog.Errorf("auth failed: %s, %v", output, err) + secretCtrlLog.Error(err, "auth failed", "output", output) return reconcile.Result{}, err } conf := jfsSetting.Name + ".conf" confPath := filepath.Join(jfsSetting.ClientConfPath, conf) b, err := os.ReadFile(confPath) if err != nil { - klog.Errorf("read initconfig %s failed: %v", conf, err) + secretCtrlLog.Error(err, "read initconfig failed", "conf", conf) return reconcile.Result{}, err } confs := string(b) @@ -142,7 +146,7 @@ func (m *SecretController) Reconcile(ctx context.Context, request reconcile.Requ secrets.StringData = secretsMap err = m.UpdateSecret(ctx, secrets) if err != nil { - klog.Errorf("inject initconfig into %s failed: %v", request.Name, err) + secretCtrlLog.Error(err, "inject initconfig failed", "name", request.Name) return reconcile.Result{}, err } // requeue after to make sure the initconfig is always up-to-date @@ -158,25 +162,25 @@ func (m *SecretController) SetupWithManager(mgr ctrl.Manager) error { return c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { secret := event.Object.(*corev1.Secret) - klog.V(6).Infof("watch secret %s created", secret.GetName()) + secretCtrlLog.V(1).Info("watch secret created", "name", secret.GetName()) return true }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { secretNew, ok := updateEvent.ObjectNew.(*corev1.Secret) if !ok { - klog.V(6).Infof("secret.onUpdateFunc Skip object: %v", updateEvent.ObjectNew) + secretCtrlLog.V(1).Info("secret.onUpdateFunc Skip object", "object", updateEvent.ObjectNew) return false } - klog.V(6).Infof("watch secret %s updated", secretNew.GetName()) + secretCtrlLog.V(1).Info("watch secret updated", "name", secretNew.GetName()) secretOld, ok := updateEvent.ObjectOld.(*corev1.Secret) if !ok { - klog.V(6).Infof("secret.onUpdateFunc Skip object: %v", updateEvent.ObjectOld) + secretCtrlLog.V(1).Info("secret.onUpdateFunc Skip object", "object", updateEvent.ObjectOld) return false } if secretNew.GetResourceVersion() == secretOld.GetResourceVersion() { - klog.V(6).Info("secret.onUpdateFunc Skip due to resourceVersion not changed") + secretCtrlLog.V(1).Info("secret.onUpdateFunc Skip due to resourceVersion not changed") return false } @@ -184,7 +188,7 @@ func (m *SecretController) SetupWithManager(mgr ctrl.Manager) error { }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { secret := deleteEvent.Object.(*corev1.Secret) - klog.V(6).Infof("watch secret %s deleted", secret.GetName()) + secretCtrlLog.V(1).Info("watch secret deleted", "name", secret.GetName()) return false }, }) diff --git a/pkg/dashboard/controller.go b/pkg/dashboard/controller.go index fcdfdbb817..79635a66db 100644 --- a/pkg/dashboard/controller.go +++ b/pkg/dashboard/controller.go @@ -19,12 +19,11 @@ package dashboard import ( "context" - "github.com/juicedata/juicefs-csi-driver/pkg/config" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -32,8 +31,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/juicedata/juicefs-csi-driver/pkg/config" ) +var mgrLog = klog.NewKlogr().WithName("manager") + func (api *API) StartManager(ctx context.Context, mgr manager.Manager) error { podCtr := PodController{api} pvCtr := PVController{api} @@ -65,7 +68,7 @@ type PVCController struct { func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { pod := &corev1.Pod{} if err := c.cachedReader.Get(ctx, req.NamespacedName, pod); err != nil { - klog.Errorf("get pod %s failed: %v", req.NamespacedName, err) + mgrLog.Error(err, "get pod failed", "namespacedName", req.NamespacedName) return reconcile.Result{}, nil } if !isSysPod(pod) && !isAppPod(pod) && !c.isAppPodShouldList(ctx, pod) { @@ -79,7 +82,7 @@ func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (r delete(c.csiNodeIndex, pod.Spec.NodeName) c.csiNodeLock.Unlock() } - klog.V(6).Infof("pod %s deleted", req.NamespacedName) + mgrLog.V(1).Info("pod deleted", "namespacedName", req.NamespacedName) return reconcile.Result{}, nil } indexes := c.appIndexes @@ -103,7 +106,7 @@ func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (r return &pod, err }, ) - klog.V(6).Infof("pod %s created", req.NamespacedName) + mgrLog.V(1).Info("pod created", "namespacedName", req.NamespacedName) return reconcile.Result{}, nil } @@ -138,7 +141,7 @@ func (c *PodController) SetupWithManager(mgr manager.Manager) error { delete(c.csiNodeIndex, pod.Spec.NodeName) c.csiNodeLock.Unlock() } - klog.V(6).Infof("pod %s%s deleted", pod.GetNamespace(), pod.GetName()) + mgrLog.V(1).Info("pod deleted", "namespace", pod.GetNamespace(), "name", pod.GetName()) return false } return true @@ -156,11 +159,11 @@ func (c *PVController) Reconcile(ctx context.Context, req reconcile.Request) (re c.pvIndexes.removeIndex(req.NamespacedName) return reconcile.Result{}, nil } - klog.Errorf("get pv %s failed: %v", req.NamespacedName, err) + mgrLog.Error(err, "get pv failed", "namespacedName", req.NamespacedName) return reconcile.Result{}, err } if pv.DeletionTimestamp != nil { - klog.V(6).Infof("watch pv %s deleted", req.NamespacedName) + mgrLog.V(1).Info("watch pv deleted", "namespacedName", req.NamespacedName) c.pvIndexes.removeIndex(req.NamespacedName) if pv.Spec.ClaimRef != nil { pvcName := types.NamespacedName{ @@ -193,7 +196,7 @@ func (c *PVController) Reconcile(ctx context.Context, req reconcile.Request) (re c.pairLock.Unlock() var pvc corev1.PersistentVolumeClaim if err := c.cachedReader.Get(ctx, pvcName, &pvc); err != nil { - klog.Errorf("get pvc %s failed: %v", pvcName, err) + mgrLog.Error(err, "get pvc failed", "name", pvcName) return reconcile.Result{}, nil } c.pvcIndexes.addIndex( @@ -206,7 +209,7 @@ func (c *PVController) Reconcile(ctx context.Context, req reconcile.Request) (re }, ) } - klog.V(6).Infof("pv %s created", req.NamespacedName) + mgrLog.V(1).Info("pv created", "namespacedName", req.NamespacedName) return reconcile.Result{}, nil } @@ -236,7 +239,7 @@ func (c *PVController) SetupWithManager(mgr manager.Manager) error { func (c *PVCController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { pvc := &corev1.PersistentVolumeClaim{} if err := c.cachedReader.Get(ctx, req.NamespacedName, pvc); err != nil { - klog.Errorf("get pvc %s failed: %v", req.NamespacedName, err) + mgrLog.Error(err, "get pvc failed", "namespacedName", req.NamespacedName) return reconcile.Result{}, nil } if pvc.DeletionTimestamp != nil { @@ -268,7 +271,7 @@ func (c *PVCController) Reconcile(ctx context.Context, req reconcile.Request) (r } var pv corev1.PersistentVolume if err := c.cachedReader.Get(ctx, pvName, &pv); err != nil { - klog.Errorf("get pv %s failed: %v", pvName, err) + mgrLog.Error(err, "get pv failed", "name", pvName) return reconcile.Result{}, err } c.pairLock.Lock() @@ -317,7 +320,7 @@ func (c *PVCController) SetupWithManager(mgr manager.Manager) error { Namespace: pvc.GetNamespace(), Name: pvc.GetName(), } - klog.V(6).Infof("watch pvc %s deleted", name) + mgrLog.V(1).Info("watch pvc deleted", "name", name) c.pvcIndexes.removeIndex(name) c.pairLock.Lock() delete(c.pairs, name) diff --git a/pkg/dashboard/index.go b/pkg/dashboard/index.go index ac4430d2d2..254af2224f 100644 --- a/pkg/dashboard/index.go +++ b/pkg/dashboard/index.go @@ -25,9 +25,11 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + "k8s.io/klog/v2" ) +var indexLog = klog.NewKlogr().WithName("index") + type k8sResource interface { corev1.Pod | corev1.PersistentVolume | corev1.PersistentVolumeClaim | storagev1.StorageClass } @@ -79,7 +81,7 @@ func (i *timeOrderedIndexes[T]) addIndex(resource *T, metaGetter func(*T) metav1 for e := i.list.Back(); e != nil; e = e.Prev() { currentResource, err := resourceGetter(e.Value.(types.NamespacedName)) if err != nil || currentResource == nil { - klog.V(1).Infof("failed to get resource %s: %v", e.Value.(types.NamespacedName), err) + indexLog.V(1).Info("failed to get resource", "namespacedName", e.Value.(types.NamespacedName), "error", err) i.list.Remove(e) continue } diff --git a/pkg/dashboard/pod.go b/pkg/dashboard/pod.go index 4550efd7f0..1f627eefce 100644 --- a/pkg/dashboard/pod.go +++ b/pkg/dashboard/pod.go @@ -30,13 +30,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) +var podLog = klog.NewKlogr().WithName("pod") + type PodExtra struct { *corev1.Pod `json:",inline"` Pvs []*corev1.PersistentVolume `json:"pvs"` @@ -109,26 +111,26 @@ func (api *API) listAppPod() gin.HandlerFunc { if pod.Pvs == nil { pod.Pvs, err = api.listPVsOfPod(c, pod.Pod) if err != nil { - klog.Errorf("get pvs of %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get pvs error", "node", pod.Spec.NodeName) } } if pod.MountPods == nil { pod.MountPods, err = api.listMountPodOf(c, pod.Pod) if err != nil { - klog.Errorf("get mount pods of %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get mount pods error", "node", pod.Spec.NodeName) } } if pod.CsiNode == nil { pod.CsiNode, err = api.getCSINode(c, pod.Spec.NodeName) if err != nil { - klog.Errorf("get csi node %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get csi node error", "node", pod.Spec.NodeName) } } if pod.Spec.NodeName != "" { var node corev1.Node err := api.cachedReader.Get(c, types.NamespacedName{Name: pod.Spec.NodeName}, &node) if err != nil { - klog.Errorf("get node %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get node error", "node", pod.Spec.NodeName) } else { pod.Node = &node } @@ -149,7 +151,7 @@ func (api *API) filterPVsOfPod(ctx context.Context, pod *PodExtra, filter string var err error pod.Pvs, err = api.listPVsOfPod(ctx, pod.Pod) if err != nil { - klog.Errorf("get pvs of %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get pvs error", "node", pod.Spec.NodeName) } for _, pv := range pod.Pvs { if strings.Contains(pv.Name, filter) { @@ -166,7 +168,7 @@ func (api *API) filterMountPodsOfPod(ctx context.Context, pod *PodExtra, filter var err error pod.MountPods, err = api.listMountPodOf(ctx, pod.Pod) if err != nil { - klog.Errorf("get mount pods of %s error %v", pod.Spec.NodeName, err) + podLog.Error(err, "get mount pods error", "node", pod.Spec.NodeName) } for _, mountPod := range pod.MountPods { if strings.Contains(mountPod.Name, filter) { @@ -234,13 +236,13 @@ func (api *API) listSysPod() gin.HandlerFunc { var node corev1.Node err := api.cachedReader.Get(c, types.NamespacedName{Name: pods[i].Spec.NodeName}, &node) if err != nil { - klog.Errorf("get node %s error %v", pods[i].Spec.NodeName, err) + podLog.Error(err, "get node error", "node", pods[i].Spec.NodeName) continue } var csiNode *corev1.Pod csiNode, err = api.getCSINode(c, pods[i].Spec.NodeName) if err != nil { - klog.Errorf("get csi node %s error %v", pods[i].Spec.NodeName, err) + podLog.Error(err, "get csi node error", "node", pods[i].Spec.NodeName) } result.Pods = append(result.Pods, &PodExtra{ Pod: pods[i], @@ -574,7 +576,7 @@ func (api *API) listAppPodsOfMountPod() gin.HandlerFunc { for _, v := range pod.Annotations { uid := getUidFunc(v) if uid == "" { - klog.V(6).Infof("annotation %s skipped", v) + podLog.V(1).Info("annotation skipped", "annotations", v) continue } if p, ok := podMap[uid]; ok { @@ -776,7 +778,7 @@ func (api *API) execPod() gin.HandlerFunc { if err := resource.ExecInPod( api.client, api.kubeconfig, terminal, namespace, name, container, []string{"sh", "-c", "bash || sh"}); err != nil { - klog.Error("Failed to exec in pod: ", err) + podLog.Error(err, "Failed to exec in pod") return } }).ServeHTTP(c.Writer, c.Request) @@ -795,18 +797,18 @@ func (api *API) watchMountPodAccessLog() gin.HandlerFunc { terminal := resource.NewTerminalSession(ctx, ws, resource.EndOfText) mountpod, err := api.client.CoreV1().Pods(namespace).Get(c, name, metav1.GetOptions{}) if err != nil { - klog.Error("Failed to get mount pod: ", err) + podLog.Error(err, "Failed to get mount pod") return } mntPath, _, err := resource.GetMountPathOfPod(*mountpod) if err != nil || mntPath == "" { - klog.Error("Failed to get mount path: ", err) + podLog.Error(err, "Failed to get mount path") return } if err := resource.ExecInPod( api.client, api.kubeconfig, terminal, namespace, name, container, []string{"sh", "-c", "cat " + mntPath + "/.accesslog"}); err != nil { - klog.Error("Failed to exec in pod: ", err) + podLog.Error(err, "Failed to exec in pod") return } }).ServeHTTP(c.Writer, c.Request) @@ -828,12 +830,12 @@ func (api *API) debugPod() gin.HandlerFunc { terminal := resource.NewTerminalSession(ctx, ws, resource.EndOfText) mountpod, err := api.client.CoreV1().Pods(namespace).Get(c, name, metav1.GetOptions{}) if err != nil { - klog.Error("Failed to get mount pod: ", err) + podLog.Error(err, "Failed to get mount pod") return } mntPath, _, err := resource.GetMountPathOfPod(*mountpod) if err != nil || mntPath == "" { - klog.Error("Failed to get mount path: ", err) + podLog.Error(err, "Failed to get mount path") return } if err := resource.ExecInPod( @@ -846,7 +848,7 @@ func (api *API) debugPod() gin.HandlerFunc { "--stats-sec", statsSec, "--out-dir", "/debug", mntPath}); err != nil { - klog.Error("Failed to start process: ", err) + podLog.Error(err, "Failed to start process") return } }).ServeHTTP(c.Writer, c.Request) @@ -863,7 +865,7 @@ func (api *API) downloadDebugFile() gin.HandlerFunc { api.client, api.kubeconfig, c.Writer, namespace, name, container, []string{"sh", "-c", "cat $(ls -t /debug/*.zip | head -n 1) && exit 0"}) if err != nil { - klog.Error("Failed to create SPDY executor: ", err) + podLog.Error(err, "Failed to create SPDY executor") return } } diff --git a/pkg/dashboard/pv.go b/pkg/dashboard/pv.go index f5dd9e7894..344dcb0372 100644 --- a/pkg/dashboard/pv.go +++ b/pkg/dashboard/pv.go @@ -27,11 +27,13 @@ import ( storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/config" ) +var pvLog = klog.NewKlogr().WithName("pv") + func (api *API) listPodPVsHandler() gin.HandlerFunc { return func(c *gin.Context) { obj, ok := c.Get("pod") @@ -338,7 +340,7 @@ func (api *API) getPV(ctx context.Context, name string) (*corev1.PersistentVolum var pv corev1.PersistentVolume if err := api.cachedReader.Get(ctx, api.sysNamespaced(name), &pv); err != nil { if k8serrors.IsNotFound(err) { - klog.Errorf("get pv %s error: %v", name, err) + pvLog.Error(err, "get pv error", "name", name) return nil, nil } return nil, err @@ -350,7 +352,7 @@ func (api *API) getPVC(namespace, name string) (*corev1.PersistentVolumeClaim, e var pvc corev1.PersistentVolumeClaim if err := api.cachedReader.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, &pvc); err != nil { if k8serrors.IsNotFound(err) { - klog.Errorf("get pvc %s/%s error: %v", namespace, name, err) + pvLog.Error(err, "get pvc error", "namespace", namespace, "name", name) return nil, nil } return nil, err diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index f00839a74a..8f0c8bdfef 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -10,7 +10,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -35,6 +35,7 @@ var ( csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, } + ctrlLog = klog.NewKlogr().WithName("controller") ) type controllerService struct { @@ -55,8 +56,9 @@ func newControllerService(k8sClient *k8sclient.K8sClient) (controllerService, er // CreateVolume create directory in an existing JuiceFS filesystem func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + log := ctrlLog.WithName("CreateVolume") // DEBUG only, secrets exposed in args - // klog.V(5).Infof("CreateVolume: called with args: %#v", req) + // klog.Infof("CreateVolume: called with args: %#v", req) if len(req.Name) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") @@ -74,7 +76,7 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol volumeId := req.Name subPath := req.Name secrets := req.Secrets - klog.V(5).Infof("CreateVolume: Secrets contains keys %+v", reflect.ValueOf(secrets).MapKeys()) + log.Info("Secrets contains keys", "secretKeys", reflect.ValueOf(secrets).MapKeys()) requiredCap := req.CapacityRange.GetRequiredBytes() if capa, ok := d.vols[req.Name]; ok && capa < requiredCap { @@ -86,7 +88,7 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol volCtx := make(map[string]string) for k, v := range req.Parameters { if strings.HasPrefix(v, "$") { - klog.Warningf("CreateVolume: volume %s parameters %s uses template pattern, please enable provisioner in CSI Controller, not works in default mode.", volumeId, k) + log.Info("volume parameters uses template pattern, please enable provisioner in CSI Controller, not works in default mode.", "volumeId", volumeId, "parameters", k) } volCtx[k] = v } @@ -104,11 +106,11 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol // check if use pathpattern if req.Parameters["pathPattern"] != "" { - klog.Warningf("CreateVolume: volume %s uses pathPattern, please enable provisioner in CSI Controller, not works in default mode.", volumeId) + log.Info("volume uses pathPattern, please enable provisioner in CSI Controller, not works in default mode.", "volumeId", volumeId) } // check if use secretFinalizer if req.Parameters["secretFinalizer"] == "true" { - klog.Warningf("CreateVolume: volume %s uses secretFinalizer, please enable provisioner in CSI Controller, not works in default mode.", volumeId) + log.Info("volume uses secretFinalizer, please enable provisioner in CSI Controller, not works in default mode.", "volumeId", volumeId) } volCtx["subPath"] = subPath @@ -123,6 +125,7 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol // DeleteVolume moves directory for the volume to trash (TODO) func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + log := ctrlLog.WithName("DeleteVolume") volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") @@ -134,24 +137,24 @@ func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Errorf(codes.InvalidArgument, "Check Volume ID error: %v", err) } if !dynamic { - klog.V(5).Infof("Volume %s not dynamic PV, ignore.", volumeID) + log.Info("Volume is not dynamic PV, ignore.", "volumeId", volumeID) return &csi.DeleteVolumeResponse{}, nil } secrets := req.Secrets - klog.V(5).Infof("DeleteVolume: Secrets contains keys %+v", reflect.ValueOf(secrets).MapKeys()) + log.Info("Secrets contains keys", "secretKeys", reflect.ValueOf(secrets).MapKeys()) if len(secrets) == 0 { - klog.V(5).Infof("DeleteVolume: Secrets is empty, skip.") + log.Info("Secrets is empty, skip.") return &csi.DeleteVolumeResponse{}, nil } if acquired := d.volLocks.TryAcquire(volumeID); !acquired { - klog.Errorf("DeleteVolume: Volume %q is being used by another operation", volumeID) + log.Info("Volume is being used by another operation", "volumeId", volumeID) return nil, status.Errorf(codes.Aborted, "DeleteVolume: Volume %q is being used by another operation", volumeID) } defer d.volLocks.Release(volumeID) - klog.V(5).Infof("DeleteVolume: Deleting volume %q", volumeID) + log.Info("Deleting volume", "volumeId", volumeID) err = d.juicefs.JfsDeleteVol(ctx, volumeID, volumeID, secrets, nil, nil) if err != nil { return nil, status.Errorf(codes.Internal, "Could not delVol in juicefs: %v", err) @@ -163,7 +166,8 @@ func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVol // ControllerGetCapabilities gets capabilities func (d *controllerService) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { - klog.V(6).Infof("ControllerGetCapabilities: called with args %#v", req) + log := ctrlLog.WithName("ControllerGetCapabilities") + log.V(1).Info("called with args", "args", req) var caps []*csi.ControllerServiceCapability for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ @@ -180,19 +184,22 @@ func (d *controllerService) ControllerGetCapabilities(ctx context.Context, req * // GetCapacity unimplemented func (d *controllerService) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { - klog.V(6).Infof("GetCapacity: called with args %#v", req) + log := ctrlLog.WithName("GetCapacity") + log.V(1).Info("called with args", "args", req) return nil, status.Error(codes.Unimplemented, "") } // ListVolumes unimplemented func (d *controllerService) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { - klog.V(6).Infof("ListVolumes: called with args %#v", req) + log := ctrlLog.WithName("ListVolumes") + log.V(1).Info("called with args", "args", req) return nil, status.Error(codes.Unimplemented, "") } // ValidateVolumeCapabilities validates volume capabilities func (d *controllerService) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - klog.V(6).Infof("ValidateVolumeCapabilities: called with args %#v", req) + log := ctrlLog.WithName("ValidateVolumeCapabilities") + log.V(1).Info("called with args", "args", req) volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") @@ -261,7 +268,8 @@ func (d *controllerService) ListSnapshots(ctx context.Context, req *csi.ListSnap // ControllerExpandVolume adjusts quota according to capacity settings func (d *controllerService) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { - klog.V(6).Infof("ControllerExpandVolume request: %+v", *req) + log := ctrlLog.WithName("ControllerExpandVolume") + log.V(1).Info("request", "request", req) volumeID := req.GetVolumeId() if len(volumeID) == 0 { @@ -285,7 +293,7 @@ func (d *controllerService) ControllerExpandVolume(ctx context.Context, req *csi if volCap == nil { return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") } - klog.V(5).Infof("NodePublishVolume: volume_capability is %s", volCap) + log.Info("volume capability", "volCap", volCap) options := []string{} if m := volCap.GetMount(); m != nil { // get mountOptions from PV.spec.mountOptions or StorageClass.mountOptions diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 645d9d470d..cd1de7614d 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -22,14 +22,17 @@ import ( "time" "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" +) - "github.com/prometheus/client_golang/prometheus" +var ( + driverLog = klog.NewKlogr().WithName("driver") ) // Driver struct @@ -47,14 +50,14 @@ func NewDriver(endpoint string, nodeID string, leaderElection bool, leaderElectionNamespace string, leaderElectionLeaseDuration time.Duration, reg prometheus.Registerer) (*Driver, error) { - klog.Infof("Driver: %v version %v commit %v date %v", config.DriverName, driverVersion, gitCommit, buildDate) + driverLog.Info("get version info", "driver", config.DriverName, "verison", driverVersion, "commit", gitCommit, "date", buildDate) var k8sClient *k8sclient.K8sClient if !config.ByProcess { var err error k8sClient, err = k8sclient.NewClient() if err != nil { - klog.V(5).Infof("Can't get k8s client: %v", err) + driverLog.Error(err, "Can't get k8s client") return nil, err } } @@ -99,7 +102,7 @@ func (d *Driver) Run() error { logErr := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { resp, err := handler(ctx, req) if err != nil { - klog.Errorf("GRPC error: %v", err) + driverLog.Error(err, "GRPC error") } return resp, err } @@ -112,12 +115,12 @@ func (d *Driver) Run() error { csi.RegisterControllerServer(d.srv, d) csi.RegisterNodeServer(d.srv, d) - klog.Infof("Listening for connection on address: %#v", listener.Addr()) + driverLog.Info("Listening for connection on address", "address", listener.Addr()) return d.srv.Serve(listener) } // Stop stops server func (d *Driver) Stop() { - klog.Infof("Stopped server") + driverLog.Info("Stopped server") d.srv.Stop() } diff --git a/pkg/driver/identity.go b/pkg/driver/identity.go index 2dff973bd7..4a703f84f9 100644 --- a/pkg/driver/identity.go +++ b/pkg/driver/identity.go @@ -20,14 +20,17 @@ import ( "context" "github.com/container-storage-interface/spec/lib/go/csi" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/config" ) +var identityLog = klog.NewKlogr().WithName("identity") + // GetPluginInfo returns plugin info func (d *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { - klog.V(6).Infof("GetPluginInfo: called with args %+v", *req) + log := identityLog.WithName("GetPluginInfo") + log.V(1).Info("called with args", "args", req) resp := &csi.GetPluginInfoResponse{ Name: config.DriverName, VendorVersion: driverVersion, @@ -38,7 +41,8 @@ func (d *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoReques // GetPluginCapabilities returns plugin capabilities func (d *Driver) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { - klog.V(6).Infof("GetPluginCapabilities: called with args %+v", *req) + log := identityLog.WithName("GetPluginCapabilities") + log.V(1).Info("called with args", "args", req) resp := &csi.GetPluginCapabilitiesResponse{ Capabilities: []*csi.PluginCapability{ { diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 907872075f..9d0960811c 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -27,12 +27,13 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/klog" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" "github.com/prometheus/client_golang/prometheus" + "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -40,6 +41,7 @@ import ( var ( nodeCaps = []csi.NodeServiceCapability_RPC_Type{csi.NodeServiceCapability_RPC_GET_VOLUME_STATS} + nodeLog = klog.NewKlogr().WithName("node") ) const defaultCheckTimeout = 2 * time.Second @@ -100,11 +102,18 @@ func (d *nodeService) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag // NodePublishVolume is called by the CO when a workload that wants to use the specified volume is placed (scheduled) on a node func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { - // WARNING: debug only, secrets included - klog.V(6).Infof("NodePublishVolume: called with args %+v", req) - + volCtx := req.GetVolumeContext() + log := nodeLog + if volCtx != nil && volCtx[config.PodInfoName] != "" { + log = log.WithName("NodePublishVolume").WithValues("appName", volCtx[config.PodInfoName]) + } volumeID := req.GetVolumeId() - klog.V(5).Infof("NodePublishVolume: volume_id is %s", volumeID) + log = log.WithValues("volumeId", volumeID) + + ctx = util.WithLog(ctx, log) + + // WARNING: debug only, secrets included + log.V(1).Info("called with args", "args", req) target := req.GetTargetPath() if len(target) == 0 { @@ -115,13 +124,13 @@ func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublis if volCap == nil { return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") } - klog.V(5).Infof("NodePublishVolume: volume_capability is %s", volCap) + log.Info("get volume_capability", "volCap", *volCap) if !isValidVolumeCapabilities([]*csi.VolumeCapability{volCap}) { return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") } - klog.V(5).Infof("NodePublishVolume: creating dir %s", target) + log.Info("creating dir", "target", target) if err := d.juicefs.CreateTarget(ctx, target); err != nil { return nil, status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err) } @@ -135,8 +144,7 @@ func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublis options = append(options, m.MountFlags...) } - volCtx := req.GetVolumeContext() - klog.V(5).Infof("NodePublishVolume: volume context: %v", volCtx) + log.Info("get volume context", "volCtx", volCtx) secrets := req.Secrets mountOptions := []string{} @@ -146,7 +154,7 @@ func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublis } mountOptions = append(mountOptions, options...) - klog.V(5).Infof("NodePublishVolume: mounting juicefs with secret %+v, options %v", reflect.ValueOf(secrets).MapKeys(), mountOptions) + log.Info("mounting juicefs", "secret", reflect.ValueOf(secrets).MapKeys(), "options", mountOptions) jfs, err := d.juicefs.JfsMount(ctx, volumeID, target, secrets, volCtx, mountOptions) if err != nil { d.metrics.volumeErrors.Inc() @@ -187,17 +195,18 @@ func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublis err = d.juicefs.SetQuota(ctx, secrets, settings, path.Join(subdir, quotaPath), capacity) if err != nil { - klog.Error("set quota: ", err) + log.Error(err, "set quota error") } } - klog.V(5).Infof("NodePublishVolume: mounted %s at %s with options %v", volumeID, target, mountOptions) + log.Info("juicefs volume mounted", "volumeId", volumeID, "target", target) return &csi.NodePublishVolumeResponse{}, nil } // NodeUnpublishVolume is a reverse operation of NodePublishVolume. This RPC is typically called by the CO when the workload using the volume is being moved to a different node, or all the workload using the volume on a node has finished. func (d *nodeService) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { - klog.V(6).Infof("NodeUnpublishVolume: called with args %+v", req) + log := nodeLog.WithName("NodeUnpublishVolume") + log.V(1).Info("called with args", "args", req) target := req.GetTargetPath() if len(target) == 0 { @@ -205,7 +214,7 @@ func (d *nodeService) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu } volumeId := req.GetVolumeId() - klog.V(5).Infof("NodeUnpublishVolume: volume_id is %s", volumeId) + log.Info("get volume_id", "volumeId", volumeId) err := d.juicefs.JfsUnmount(ctx, volumeId, target) if err != nil { @@ -218,7 +227,8 @@ func (d *nodeService) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu // NodeGetCapabilities response node capabilities to CO func (d *nodeService) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { - klog.V(6).Infof("NodeGetCapabilities: called with args %+v", req) + log := nodeLog.WithName("NodeGetCapabilities") + log.V(1).Info("called with args", "args", req) var caps []*csi.NodeServiceCapability for _, cap := range nodeCaps { c := &csi.NodeServiceCapability{ @@ -235,7 +245,8 @@ func (d *nodeService) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetC // NodeGetInfo is called by CO for the node at which it wants to place the workload. The result of this call will be used by CO in ControllerPublishVolume. func (d *nodeService) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - klog.V(6).Infof("NodeGetInfo: called with args %+v", req) + log := nodeLog.WithName("NodeGetInfo") + log.V(1).Info("called with args", "args", req) return &csi.NodeGetInfoResponse{ NodeId: d.nodeID, @@ -248,7 +259,8 @@ func (d *nodeService) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV } func (d *nodeService) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { - klog.V(6).Infof("NodeGetVolumeStats: called with args %+v", req) + log := nodeLog.WithName("NodeGetVolumeStats") + log.V(1).Info("called with args", "args", req) volumeID := req.GetVolumeId() if len(volumeID) == 0 { @@ -268,7 +280,7 @@ func (d *nodeService) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo }) if err == nil { if !exists { - klog.V(5).Infof("NodeGetVolumeStats: %s Volume path not exists", volumePath) + log.Info("Volume path not exists", "volumePath", volumePath) return nil, status.Error(codes.NotFound, "Volume path not exists") } if d.SafeFormatAndMount.Interface != nil { @@ -278,16 +290,16 @@ func (d *nodeService) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo return err }) if err != nil { - klog.V(5).Infof("NodeGetVolumeStats: Check volume path %s is mountpoint failed: %s", volumePath, err) + log.Info("Check volume path is mountpoint failed", "volumePath", volumePath, "error", err) return nil, status.Errorf(codes.Internal, "Check volume path is mountpoint failed: %s", err) } if notMnt { // target exists but not a mountpoint - klog.V(5).Infof("NodeGetVolumeStats: %s volume path not mounted", volumePath) + log.Info("volume path not mounted", "volumePath", volumePath) return nil, status.Error(codes.Internal, "Volume path not mounted") } } } else { - klog.V(5).Infof("NodeGetVolumeStats: Check volume path %s, err: %s", volumePath, err) + log.Info("Check volume path %s, err: %s", "volumePath", volumePath, "error", err) return nil, status.Errorf(codes.Internal, "Check volume path, err: %s", err) } diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 94c36a364c..8b007116d1 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -74,12 +74,13 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) mockJfs := mocks.NewMockJfs(mockCtl) - mockJfs.EXPECT().CreateVol(context.TODO(), volumeId, subPath).Return(bindSource, nil) - mockJfs.EXPECT().BindTarget(context.TODO(), bindSource, targetPath).Return(nil) + mockJfs.EXPECT().CreateVol(ctx, volumeId, subPath).Return(bindSource, nil) + mockJfs.EXPECT().BindTarget(ctx, bindSource, targetPath).Return(nil) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, nodeID: "fake_node_id", @@ -121,12 +122,13 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) mockJfs := mocks.NewMockJfs(mockCtl) - mockJfs.EXPECT().CreateVol(context.TODO(), volumeId, subPath).Return(bindSource, nil) - mockJfs.EXPECT().BindTarget(context.TODO(), bindSource, targetPath).Return(nil) + mockJfs.EXPECT().CreateVol(ctx, volumeId, subPath).Return(bindSource, nil) + mockJfs.EXPECT().BindTarget(ctx, bindSource, targetPath).Return(nil) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, mountOptions).Return(mockJfs, nil) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, mountOptions).Return(mockJfs, nil) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, @@ -168,12 +170,13 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) mockJfs := mocks.NewMockJfs(mockCtl) - mockJfs.EXPECT().CreateVol(context.TODO(), volumeId, subPath).Return(bindSource, nil) - mockJfs.EXPECT().BindTarget(context.TODO(), bindSource, targetPath).Return(nil) + mockJfs.EXPECT().CreateVol(ctx, volumeId, subPath).Return(bindSource, nil) + mockJfs.EXPECT().BindTarget(ctx, bindSource, targetPath).Return(nil) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, mountOptions).Return(mockJfs, nil) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, mountOptions).Return(mockJfs, nil) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, @@ -221,10 +224,12 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) + mockJfs := mocks.NewMockJfs(mockCtl) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, errors.New("test")) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, errors.New("test")) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, @@ -263,11 +268,12 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) mockJfs := mocks.NewMockJfs(mockCtl) - mockJfs.EXPECT().CreateVol(context.TODO(), volumeId, subPath).Return(bindSource, errors.New("test")) + mockJfs.EXPECT().CreateVol(ctx, volumeId, subPath).Return(bindSource, errors.New("test")) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, @@ -306,12 +312,13 @@ func TestNodePublishVolume(t *testing.T) { mockCtl := gomock.NewController(t) defer mockCtl.Finish() + ctx := util.WithLog(context.TODO(), nodeLog.WithValues("volumeId", volumeId)) mockJfs := mocks.NewMockJfs(mockCtl) - mockJfs.EXPECT().CreateVol(context.TODO(), volumeId, subPath).Return(bindSource, nil) - mockJfs.EXPECT().BindTarget(context.TODO(), bindSource, targetPath).Return(errors.New("test")) + mockJfs.EXPECT().CreateVol(ctx, volumeId, subPath).Return(bindSource, nil) + mockJfs.EXPECT().BindTarget(ctx, bindSource, targetPath).Return(errors.New("test")) mockJuicefs := mocks.NewMockInterface(mockCtl) - mockJuicefs.EXPECT().JfsMount(context.TODO(), volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) - mockJuicefs.EXPECT().CreateTarget(context.TODO(), targetPath).Return(nil) + mockJuicefs.EXPECT().JfsMount(ctx, volumeId, targetPath, secret, volumeCtx, []string{"ro"}).Return(mockJfs, nil) + mockJuicefs.EXPECT().CreateTarget(ctx, targetPath).Return(nil) juicefsDriver := &nodeService{ juicefs: mockJuicefs, diff --git a/pkg/driver/provisioner.go b/pkg/driver/provisioner.go index d80fad9876..aeef25f31c 100644 --- a/pkg/driver/provisioner.go +++ b/pkg/driver/provisioner.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "os" "strconv" "strings" "time" @@ -28,7 +29,7 @@ import ( "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" + "k8s.io/klog/v2" provisioncontroller "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" "github.com/prometheus/client_golang/prometheus" @@ -39,6 +40,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) +var ( + provisionerLog = klog.NewKlogr().WithName("provisioner") +) + type provisionerService struct { juicefs juicefs.Interface *k8s.K8sClient @@ -81,11 +86,13 @@ func newProvisionerService(k8sClient *k8s.K8sClient, leaderElection bool, func (j *provisionerService) Run(ctx context.Context) { if j.K8sClient == nil { - klog.Fatalf("K8sClient is nil") + provisionerLog.Info("K8sClient is nil") + os.Exit(1) } serverVersion, err := j.K8sClient.Discovery().ServerVersion() if err != nil { - klog.Fatalf("Error getting server version: %v", err) + provisionerLog.Error(err, "Error getting server version") + os.Exit(1) } pc := provisioncontroller.NewProvisionController(j.K8sClient, config.DriverName, @@ -99,7 +106,7 @@ func (j *provisionerService) Run(ctx context.Context) { } func (j *provisionerService) Provision(ctx context.Context, options provisioncontroller.ProvisionOptions) (*corev1.PersistentVolume, provisioncontroller.ProvisioningState, error) { - klog.V(6).Infof("Provisioner Provision: options %v", options) + provisionerLog.V(1).Info("provision options", "options", options) if options.PVC.Spec.Selector != nil { return nil, provisioncontroller.ProvisioningFinished, fmt.Errorf("claim Selector is not supported") } @@ -115,7 +122,7 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon scParams[k] = pvMeta.StringParser(options.StorageClass.Parameters[k]) } } - klog.V(6).Infof("Provisioner Resolved StorageClass.Parameters: %v", scParams) + provisionerLog.V(1).Info("Resolved StorageClass.Parameters", "params", scParams) subPath := pvName if scParams["pathPattern"] != "" { @@ -128,7 +135,7 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon j.metrics.provisionErrors.Inc() return nil, provisioncontroller.ProvisioningFinished, status.Errorf(codes.InvalidArgument, "Dynamic mounting uses the sub-path named pv name as data isolation, so read-only mode cannot be used.") } else { - klog.Warningf("Volume is set readonly, please make sure the subpath %s exists.", subPath) + provisionerLog.Info("Volume is set readonly, please make sure the subpath exists.", "subPath", subPath) } } } @@ -138,7 +145,7 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon parsedStr := pvMeta.StringParser(mo) mountOptions = append(mountOptions, strings.Split(strings.TrimSpace(parsedStr), ",")...) } - klog.V(6).Infof("Provisioner Resolved MountOptions: %v", mountOptions) + provisionerLog.V(1).Info("Resolved MountOptions", "options", mountOptions) // set volume context volCtx := make(map[string]string) @@ -185,45 +192,45 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon if pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete && options.StorageClass.Parameters["secretFinalizer"] == "true" { secret, err := j.K8sClient.GetSecret(ctx, scParams[config.ProvisionerSecretName], scParams[config.ProvisionerSecretNamespace]) if err != nil { - klog.Errorf("[PVCReconciler]: Get Secret error: %v", err) + provisionerLog.Error(err, "Get Secret error") j.metrics.provisionErrors.Inc() return nil, provisioncontroller.ProvisioningFinished, errors.New("unable to provision new pv: " + err.Error()) } - klog.V(6).Infof("Provisioner: Add Finalizer on %s/%s", secret.Namespace, secret.Name) + provisionerLog.V(1).Info("Add Finalizer", "namespace", secret.Namespace, "name", secret.Name) err = resource.AddSecretFinalizer(ctx, j.K8sClient, secret, config.Finalizer) if err != nil { - klog.Warningf("Fails to add a finalizer to the secret, error: %v", err) + provisionerLog.Error(err, "Fails to add a finalizer to the secret") } } return pv, provisioncontroller.ProvisioningFinished, nil } func (j *provisionerService) Delete(ctx context.Context, volume *corev1.PersistentVolume) error { - klog.V(6).Infof("Provisioner Delete: Volume %v", volume) + provisionerLog.V(1).Info("Delete volume", "volume", *volume) // If it exists and has a `delete` value, delete the directory. // If it exists and has a `retain` value, safe the directory. policy := volume.Spec.PersistentVolumeReclaimPolicy if policy != corev1.PersistentVolumeReclaimDelete { - klog.V(6).Infof("Provisioner: Volume %s retain, return.", volume.Name) + provisionerLog.V(1).Info("Volume retain, return.", "volume", volume.Name) return nil } // check all pvs of the same storageClass, if multiple pv using the same subPath, do not delete the subPath shouldDeleted, err := resource.CheckForSubPath(ctx, j.K8sClient, volume, volume.Spec.CSI.VolumeAttributes["pathPattern"]) if err != nil { - klog.Errorf("Provisioner: CheckForSubPath error: %v", err) + provisionerLog.Error(err, "check for subPath error") return err } if !shouldDeleted { - klog.Infof("Provisioner: there are other pvs using the same subPath retained, volume %s should not be deleted, return.", volume.Name) + provisionerLog.Info("there are other pvs using the same subPath retained, volume should not be deleted, return.", "volume", volume.Name) return nil } - klog.V(6).Infof("Provisioner: there are no other pvs using the same subPath, volume %s can be deleted.", volume.Name) + provisionerLog.V(1).Info("there are no other pvs using the same subPath, volume can be deleted.", "volume", volume.Name) subPath := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes["subPath"] secretName, secretNamespace := volume.Spec.CSI.NodePublishSecretRef.Name, volume.Spec.CSI.NodePublishSecretRef.Namespace secret, err := j.K8sClient.GetSecret(ctx, secretName, secretNamespace) if err != nil { - klog.Errorf("Provisioner: Get Secret error: %v", err) + provisionerLog.Error(err, "Get Secret error") return err } secretData := make(map[string]string) @@ -231,20 +238,20 @@ func (j *provisionerService) Delete(ctx context.Context, volume *corev1.Persiste secretData[k] = string(v) } - klog.V(5).Infof("Provisioner Delete: Deleting volume subpath %q", subPath) + provisionerLog.Info("Deleting volume subpath", "subPath", subPath) if err := j.juicefs.JfsDeleteVol(ctx, volume.Name, subPath, secretData, volume.Spec.CSI.VolumeAttributes, volume.Spec.MountOptions); err != nil { - klog.Errorf("provisioner: delete vol error %v", err) + provisionerLog.Error(err, "delete vol error") return errors.New("unable to provision delete volume: " + err.Error()) } if volume.Spec.CSI.VolumeAttributes["secretFinalizer"] == "true" { shouldRemoveFinalizer, err := resource.CheckForSecretFinalizer(ctx, j.K8sClient, volume) if err != nil { - klog.Errorf("Provisioner: CheckForSecretFinalizer error: %v", err) + provisionerLog.Error(err, "CheckForSecretFinalizer error") return err } if shouldRemoveFinalizer { - klog.V(6).Infof("Provisioner: Remove Finalizer on %s/%s", secretNamespace, secretName) + provisionerLog.V(1).Info("Remove Finalizer", "namespace", secretNamespace, "name", secretName) if err = resource.RemoveSecretFinalizer(ctx, j.K8sClient, secret, config.Finalizer); err != nil { return err } diff --git a/pkg/fuse/passfd.go b/pkg/fuse/passfd.go index eca5c4eb87..82ff94965b 100644 --- a/pkg/fuse/passfd.go +++ b/pkg/fuse/passfd.go @@ -27,12 +27,14 @@ import ( "syscall" "time" - "k8s.io/klog" + "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) +var fdLog = klog.NewKlogr().WithName("passfd") + type Fds struct { globalMu sync.Mutex basePath string @@ -59,7 +61,7 @@ func InitTestFds() { } func (fs *Fds) ParseFuseFds(ctx context.Context) error { - klog.V(6).Infof("parse fuse fd in basePath %s", fs.basePath) + fdLog.V(1).Info("parse fuse fd in basePath", "basePath", fs.basePath) var entries []os.DirEntry var err error err = util.DoWithTimeout(ctx, 2*time.Second, func() error { @@ -67,7 +69,7 @@ func (fs *Fds) ParseFuseFds(ctx context.Context) error { return err }) if err != nil { - klog.Errorf("read dir %s: %s", fs.basePath, err) + fdLog.Error(err, "read dir error", "basePath", fs.basePath) return err } for _, entry := range entries { @@ -80,12 +82,12 @@ func (fs *Fds) ParseFuseFds(ctx context.Context) error { return err }) if err != nil { - klog.Errorf("read dir %s: %s", fs.basePath, err) + fdLog.Error(err, "read dir error", "basePath", fs.basePath) return err } for _, subEntry := range subEntries { if strings.HasPrefix(subEntry.Name(), "fuse_fd_comm.") { - klog.V(6).Infof("parse fuse fd in %s", subEntry.Name()) + fdLog.V(1).Info("parse fuse fd", "path", subEntry.Name()) fs.parseFuse(ctx, entry.Name(), path.Join(fs.basePath, entry.Name(), subEntry.Name())) } } @@ -136,7 +138,7 @@ func (fs *Fds) StopFd(ctx context.Context, podHashVal string) { fs.globalMu.Unlock() return } - klog.V(6).Infof("stop fuse fd server: %s", f.serverAddress) + fdLog.V(1).Info("stop fuse fd server", "server address", f.serverAddress) close(f.done) delete(fs.fds, podHashVal) @@ -155,7 +157,7 @@ func (fs *Fds) CloseFd(podHashVal string) { fs.globalMu.Unlock() return } - klog.V(6).Infof("close fuse fd: %s", podHashVal) + fdLog.V(1).Info("close fuse fd", "hashVal", podHashVal) _ = syscall.Close(f.fuseFd) f.fuseFd = -1 fs.fds[podHashVal] = f @@ -170,7 +172,7 @@ func (fs *Fds) parseFuse(ctx context.Context, podHashVal, fusePath string) { serverPath := path.Join(fs.basePath, podHashVal, "fuse_fd_csi_comm.sock") serverPathInPod := path.Join(fs.basePath, "fuse_fd_csi_comm.sock") - klog.V(6).Infof("fuse fd path of pod %s: %s", podHashVal, fusePath) + fdLog.V(1).Info("fuse fd path of pod", "hashVal", podHashVal, "fusePath", fusePath) f := &fd{ fuseMu: sync.Mutex{}, @@ -214,14 +216,14 @@ func (fs *Fds) serveFuseFD(ctx context.Context, podHashVal string) { return } - klog.V(6).Infof("serve fuse fd: %v, path: %s", f.fuseFd, f.serverAddress) + fdLog.V(1).Info("serve FUSE fd", "fd", f.fuseFd, "server address", f.serverAddress) _ = util.DoWithTimeout(ctx, 2*time.Second, func() error { _ = os.Remove(f.serverAddress) return nil }) sock, err := net.Listen("unix", f.serverAddress) if err != nil { - klog.Error(err) + fdLog.Error(err, "listen unix socket error") return } go func() { @@ -242,7 +244,7 @@ func (fs *Fds) serveFuseFD(ctx context.Context, podHashVal string) { if strings.Contains(err.Error(), "use of closed network connection") { return } - klog.Warningf("accept : %s", err) + fdLog.Error(err, "accept error") continue } go fs.handleFDRequest(podHashVal, conn.(*net.UnixConn)) @@ -260,12 +262,12 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { f.fuseMu.Lock() if f.fuseFd > 0 { fds = append(fds, f.fuseFd) - klog.V(6).Infof("send FUSE fd: %d", f.fuseFd) + fdLog.V(1).Info("send FUSE fd", "fd", f.fuseFd) } err := putFd(conn, f.fuseSetting, fds...) if err != nil { f.fuseMu.Unlock() - klog.Warningf("send fuse fds: %s", err) + fdLog.Error(err, "send fuse fds error") return } if f.fuseFd > 0 { @@ -277,7 +279,7 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { var msg []byte msg, fds, err = getFd(conn, 1) if err != nil { - klog.Warningf("recv fuse fds: %s", err) + fdLog.Error(err, "recv fuse fds") return } @@ -285,12 +287,12 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { if string(msg) != "CLOSE" && f.fuseFd <= 0 && len(fds) >= 1 { f.fuseFd = fds[0] f.fuseSetting = msg - klog.V(6).Infof("recv FUSE fd: %v", fds) + fdLog.V(1).Info("recv FUSE fd", "fd", fds) } else { for _, fd := range fds { _ = syscall.Close(fd) } - klog.V(6).Infof("msg: %s fds: %+v", string(msg), fds) + fdLog.V(1).Info("recv msg and fds", "msg", string(msg), "fd", fds) } f.fuseMu.Unlock() @@ -313,22 +315,22 @@ func getFuseFd(path string) (int, []byte) { } conn, err := net.Dial("unix", path) if err != nil { - klog.V(6).Infof("dial %s: %s", path, err) + fdLog.V(1).Info("dial error", "path", path, "error", err) return -1, nil } defer conn.Close() msg, fds, err := getFd(conn.(*net.UnixConn), 2) if err != nil { - klog.Warningf("recv fds: %s", err) + fdLog.Error(err, "recv fds error") return -1, nil } - klog.V(6).Infof("get fd: %v, msg: %v", fds, string(msg)) + fdLog.V(1).Info("get fd and msg", "fd", fds) _ = syscall.Close(fds[0]) if len(fds) > 1 { err = putFd(conn.(*net.UnixConn), msg, fds[1]) - klog.V(6).Infof("send FUSE: %d", fds[1]) + fdLog.V(1).Info("send FUSE fd", "fd", fds[1]) if err != nil { - klog.Warningf("send FUSE: %s", err) + fdLog.Error(err, "send FUSE error") } return fds[1], msg } diff --git a/pkg/juicefs/juicefs.go b/pkg/juicefs/juicefs.go index 3abbe2c2a7..75d4c84def 100644 --- a/pkg/juicefs/juicefs.go +++ b/pkg/juicefs/juicefs.go @@ -36,7 +36,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/klog" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" @@ -54,6 +54,8 @@ const ( procMountInfoPath = "/proc/self/mountinfo" ) +var jfsLog = klog.NewKlogr().WithName("juicefs") + // Interface of juicefs provider type Interface interface { mount.Interface @@ -92,77 +94,6 @@ type jfs struct { Setting *config.JfsSetting } -type clientVersion struct { - Major, Minor, Patch int - Tag string -} - -// raw version should be like "JuiceFS version x.x.x" -func parseRawVersion(rawVersion string) (*clientVersion, error) { - slice := strings.Split(rawVersion, " ") - if len(slice) < 3 { - return nil, fmt.Errorf("invalid version string: %s", rawVersion) - } - return parseVersion(slice[2]) -} - -func parseVersion(version string) (*clientVersion, error) { - re := regexp.MustCompile(`^(\d+)\.(\d+)\.(\d+)(?:[+-](.+))?$`) - matches := re.FindStringSubmatch(strings.TrimSpace(version)) - if matches == nil { - return nil, fmt.Errorf("invalid version string: %s", version) - } - major, err := strconv.Atoi(matches[1]) - if err != nil { - return nil, fmt.Errorf("invalid major version: %s", matches[1]) - } - minor, err := strconv.Atoi(matches[2]) - if err != nil { - return nil, fmt.Errorf("invalid minor version: %s", matches[2]) - } - patch, err := strconv.Atoi(matches[3]) - if err != nil { - return nil, fmt.Errorf("invalid patch version: %s", matches[3]) - } - return &clientVersion{ - Major: major, - Minor: minor, - Patch: patch, - Tag: matches[4], - }, nil -} - -func (v *clientVersion) String() string { - repr := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) - if v.Tag != "" { - repr += "-" + v.Tag - } - return repr -} - -func (v *clientVersion) Approximate(other *clientVersion) bool { - return v.Major == other.Major && v.Minor == other.Minor && v.Patch == other.Patch -} - -func (v *clientVersion) LessThan(other *clientVersion) bool { - if v.Major < other.Major { - return true - } - if v.Major > other.Major { - return false - } - if v.Minor < other.Minor { - return true - } - if v.Minor > other.Minor { - return false - } - if v.Patch < other.Patch { - return true - } - return false -} - // Jfs is the interface of a mounted file system type Jfs interface { GetBasePath() string @@ -179,8 +110,9 @@ func (fs *jfs) GetBasePath() string { // CreateVol creates the directory needed func (fs *jfs) CreateVol(ctx context.Context, volumeID, subPath string) (string, error) { + log := util.GenLog(ctx, jfsLog, "CreateVol") volPath := filepath.Join(fs.MountPath, subPath) - klog.V(6).Infof("CreateVol: checking %q exists in %v", volPath, fs) + log.V(1).Info("checking volPath exists", "volPath", volPath, "fs", fs) var exists bool if err := util.DoWithTimeout(ctx, defaultCheckTimeout, func() (err error) { exists, err = mount.PathExists(volPath) @@ -189,7 +121,7 @@ func (fs *jfs) CreateVol(ctx context.Context, volumeID, subPath string) (string, return "", fmt.Errorf("could not check volume path %q exists: %v", volPath, err) } if !exists { - klog.V(5).Infof("CreateVol: volume not existed") + log.Info("volume not existed") if err := util.DoWithTimeout(ctx, defaultCheckTimeout, func() (err error) { return os.MkdirAll(volPath, os.FileMode(0777)) }); err != nil { @@ -214,6 +146,7 @@ func (fs *jfs) CreateVol(ctx context.Context, volumeID, subPath string) (string, } func (fs *jfs) BindTarget(ctx context.Context, bindSource, target string) error { + log := util.GenLog(ctx, jfsLog, "BindTarget") mountInfos, err := mount.ParseMountInfo(procMountInfoPath) if err != nil { return err @@ -234,15 +167,15 @@ func (fs *jfs) BindTarget(ctx context.Context, bindSource, target string) error if targetMinor != nil { if *targetMinor == *mountMinor { // target already binded mountpath - klog.V(6).Infof("BindTarget: target %s already bind mount to %s", target, fs.MountPath) + log.V(1).Info("target already bind mounted.", "target", target, "mountPath", fs.MountPath) return nil } // target is bind by other path, umount it - klog.Infof("BindTarget: target %s bind mount to other path, umount it", target) + log.Info("target bind mount to other path, umount it", "target", target) util.UmountPath(ctx, target) } // bind target to mountpath - klog.Infof("BindTarget: binding %s at %s", bindSource, target) + log.Info("binding source at target", "source", bindSource, "target", target) if err := fs.Provider.Mount(bindSource, target, fsTypeNone, []string{"bind"}); err != nil { os.Remove(target) return err @@ -347,9 +280,10 @@ func (j *juicefs) JfsMount(ctx context.Context, volumeID string, target string, // Settings get all jfs settings and generate format/auth command func (j *juicefs) Settings(ctx context.Context, volumeID string, secrets, volCtx map[string]string, options []string) (*config.JfsSetting, error) { + log := util.GenLog(ctx, jfsLog, "Settings") pv, pvc, err := resource.GetPVWithVolumeHandleOrAppInfo(ctx, j.K8sClient, volumeID, volCtx) if err != nil { - klog.Warningf("Get PV with volumeID %s error: %v", volumeID, err) + log.Error(err, "Get PV with volumeID error", "volumeId", volumeID) } // overwrite volCtx with pvc annotations if pvc != nil { @@ -366,13 +300,13 @@ func (j *juicefs) Settings(ctx context.Context, volumeID string, secrets, volCtx jfsSetting, err := config.ParseSetting(secrets, volCtx, options, !config.ByProcess, pv, pvc) if err != nil { - klog.V(5).Infof("Parse config for %s error: %v", secrets["name"], err) + log.Error(err, "Parse config error", "secret", secrets["name"]) return nil, err } jfsSetting.VolumeId = volumeID if !jfsSetting.IsCe { if secrets["token"] == "" { - klog.V(5).Infof("token is empty, skip authfs.") + log.Info("token is empty, skip authfs.") } else { res, err := j.AuthFs(ctx, secrets, jfsSetting, false) if err != nil { @@ -385,7 +319,7 @@ func (j *juicefs) Settings(ctx context.Context, volumeID string, secrets, volCtx } else { noUpdate := false if secrets["storage"] == "" || secrets["bucket"] == "" { - klog.V(5).Infof("JfsMount: storage or bucket is empty, format --no-update.") + log.Info("JfsMount: storage or bucket is empty, format --no-update.") noUpdate = true } res, err := j.ceFormat(ctx, secrets, noUpdate, jfsSetting) @@ -399,6 +333,7 @@ func (j *juicefs) Settings(ctx context.Context, volumeID string, secrets, volCtx // genJfsSettings get jfs settings and unique id func (j *juicefs) genJfsSettings(ctx context.Context, volumeID string, target string, secrets, volCtx map[string]string, options []string) (*config.JfsSetting, error) { + log := util.GenLog(ctx, jfsLog, "Settings") // get settings jfsSetting, err := j.Settings(ctx, volumeID, secrets, volCtx, options) if err != nil { @@ -408,10 +343,10 @@ func (j *juicefs) genJfsSettings(ctx context.Context, volumeID string, target st // get unique id uniqueId, err := j.getUniqueId(ctx, volumeID) if err != nil { - klog.Errorf("Get volume name by volume id %s error: %v", volumeID, err) + log.Error(err, "Get volume name by volume id error", "volumeID", volumeID) return nil, err } - klog.V(6).Infof("Get uniqueId of volume [%s]: %s", volumeID, uniqueId) + log.V(1).Info("Get uniqueId of volume", "volumeId", volumeID, "uniqueId", uniqueId) jfsSetting.UniqueId = uniqueId jfsSetting.SecretName = fmt.Sprintf("juicefs-%s-secret", jfsSetting.UniqueId) if jfsSetting.CleanCache { @@ -428,7 +363,7 @@ func (j *juicefs) genJfsSettings(ctx context.Context, volumeID string, target st j.CacheDirMaps[uniqueId] = jfsSetting.CacheDirs j.Unlock() } - klog.V(6).Infof("Get uuid of volume [%s]: %s", volumeID, uuid) + log.V(1).Info("Get uuid of volume", "volumeId", volumeID, "uuid", uuid) } return jfsSetting, nil } @@ -459,6 +394,7 @@ func (j *juicefs) getUniqueId(ctx context.Context, volumeId string) (string, err // GetJfsVolUUID get UUID from result of `juicefs status ` func (j *juicefs) GetJfsVolUUID(ctx context.Context, jfsSetting *config.JfsSetting) (string, error) { + log := util.GenLog(ctx, jfsLog, "GetJfsVolUUID") cmdCtx, cmdCancel := context.WithTimeout(ctx, 8*defaultCheckTimeout) defer cmdCancel() statusCmd := j.Exec.CommandContext(cmdCtx, config.CeCliPath, "status", jfsSetting.Source) @@ -471,10 +407,10 @@ func (j *juicefs) GetJfsVolUUID(ctx context.Context, jfsSetting *config.JfsSetti if err != nil { re := string(stdout) if strings.Contains(re, "database is not formatted") { - klog.V(6).Infof("juicefs %s not formatted.", jfsSetting.Source) + log.V(1).Info("juicefs not formatted.", "name", jfsSetting.Source) return "", nil } - klog.Infof("juicefs status error: %v, output: '%s'", err, re) + log.Error(err, "juicefs status error", "output", re) if cmdCtx.Err() == context.DeadlineExceeded { re = fmt.Sprintf("juicefs status %s timed out", 8*defaultCheckTimeout) return "", errors.New(re) @@ -524,19 +460,20 @@ func (j *juicefs) validTarget(target string) error { } func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) error { + log := util.GenLog(ctx, jfsLog, "JfsUmount") uniqueId, err := j.getUniqueId(ctx, volumeId) if err != nil { - klog.Errorf("Get volume name by volume id %s error: %v", volumeId, err) + log.Error(err, "Get volume name by volume id error", "volumeId", volumeId) return err } if config.ByProcess { ref, err := j.processMount.GetMountRef(ctx, mountPath, "") if err != nil { - klog.Errorf("Get mount ref error: %v", err) + log.Error(err, "Get mount ref error") } err = j.processMount.JUmount(ctx, mountPath, "") if err != nil { - klog.Errorf("Get mount ref error: %v", err) + log.Error(err, "umount error") } if ref == 1 { func() { @@ -545,13 +482,13 @@ func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) er uuid := j.UUIDMaps[uniqueId] cacheDirs := j.CacheDirMaps[uniqueId] if uuid == "" && len(cacheDirs) == 0 { - klog.Infof("Can't get uuid and cacheDirs of %s. skip cache clean.", uniqueId) + log.Info("Can't get uuid and cacheDirs. skip cache clean.", "uniqueId", uniqueId) return } delete(j.UUIDMaps, uniqueId) delete(j.CacheDirMaps, uniqueId) - klog.V(5).Infof("Cleanup cache of volume %s in node %s", uniqueId, config.NodeName) + log.Info("Cleanup cache of volume", "uniqueId", uniqueId, "node", config.NodeName) // clean cache should be done even when top context timeout go func() { _ = j.processMount.CleanCache(context.TODO(), "", uuid, uniqueId, cacheDirs) @@ -571,7 +508,7 @@ func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) er pod, err := j.K8sClient.GetPod(ctx, oldPodName, config.Namespace) if err != nil { if !k8serrors.IsNotFound(err) { - klog.Errorf("JfsUnmount: Get mount pod %s err %v", oldPodName, err) + log.Error(err, "Get mount pod error", "pod", oldPodName) return err } } @@ -586,7 +523,7 @@ func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) er fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} pods, err := j.K8sClient.ListPod(ctx, config.Namespace, labelSelector, fieldSelector) if err != nil { - klog.Errorf("List pods of uniqueId %s error: %v", uniqueId, err) + log.Error(err, "List pods of uniqueId error", "uniqueId", uniqueId) return err } mountPods = append(mountPods, pods...) @@ -649,7 +586,8 @@ func (j *juicefs) CreateTarget(ctx context.Context, target string) error { } func (j *juicefs) JfsCleanupMountPoint(ctx context.Context, mountPath string) error { - klog.V(5).Infof("JfsCleanupMountPoint: clean up mount point: %q", mountPath) + log := util.GenLog(ctx, jfsLog, "JfsCleanupMountPoint") + log.Info("clean up mount point", "mountPath", mountPath) return util.DoWithTimeout(ctx, 2*defaultCheckTimeout, func() (err error) { return mount.CleanupMountPoint(mountPath, j.SafeFormatAndMount.Interface, false) }) @@ -657,6 +595,7 @@ func (j *juicefs) JfsCleanupMountPoint(ctx context.Context, mountPath string) er // AuthFs authenticates JuiceFS, enterprise edition only func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting *config.JfsSetting, force bool) (string, error) { + log := util.GenLog(ctx, jfsLog, "AuthFs") if secrets == nil { return "", status.Errorf(codes.InvalidArgument, "Nil secrets") } @@ -677,7 +616,7 @@ func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting // compatible for compatibleKey, realKey := range keysCompatible { if value, ok := secrets[compatibleKey]; ok { - klog.Infof("transform key [%s] to [%s]", compatibleKey, realKey) + log.Info("transform key", "compatibleKey", compatibleKey, "realKey", realKey) secrets[realKey] = value delete(secrets, compatibleKey) } @@ -731,7 +670,7 @@ func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting if err != nil { return "", fmt.Errorf("create config file %q failed: %v", confPath, err) } - klog.V(5).Infof("Create config file: %q success", confPath) + log.Info("Create config file success", "confPath", confPath) } } } @@ -750,7 +689,7 @@ func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting args = append(args, fmt.Sprintf("--conf-dir=%s", setting.ClientConfPath)) } - klog.V(5).Infof("AuthFs cmd: %v", cmdArgs) + log.Info("AuthFs cmd", "args", cmdArgs) // only run command when in process mode if !force && !config.ByProcess { @@ -768,10 +707,10 @@ func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting envs = append(envs, "JFS_NO_CHECK_OBJECT_STORAGE=1") authCmd.SetEnv(envs) res, err := authCmd.CombinedOutput() - klog.Infof("Auth output is %s", res) + log.Info("auth output", "output", res) if err != nil { re := string(res) - klog.Infof("Auth error: %v", err) + log.Error(err, "auth error") if cmdCtx.Err() == context.DeadlineExceeded { re = fmt.Sprintf("juicefs auth %s timed out", 8*defaultCheckTimeout) return "", errors.New(re) @@ -782,6 +721,7 @@ func (j *juicefs) AuthFs(ctx context.Context, secrets map[string]string, setting } func (j *juicefs) SetQuota(ctx context.Context, secrets map[string]string, jfsSetting *config.JfsSetting, quotaPath string, capacity int64) error { + log := util.GenLog(ctx, jfsLog, "SetQuota") cap := capacity / 1024 / 1024 / 1024 if cap <= 0 { return fmt.Errorf("capacity %d is too small, at least 1GiB for quota", capacity) @@ -795,7 +735,7 @@ func (j *juicefs) SetQuota(ctx context.Context, secrets map[string]string, jfsSe args = []string{"quota", "set", secrets["name"], "--path", quotaPath, "--capacity", strconv.FormatInt(cap, 10)} cmdArgs = []string{config.CliPath, "quota", "set", secrets["name"], "--path", quotaPath, "--capacity", strconv.FormatInt(cap, 10)} } - klog.Infof("SetQuota cmd: %s", strings.Join(cmdArgs, " ")) + log.Info("quota cmd", "command", strings.Join(cmdArgs, " ")) cmdCtx, cmdCancel := context.WithTimeout(ctx, 2*defaultCheckTimeout) defer cmdCancel() envs := syscall.Environ() @@ -814,7 +754,7 @@ func (j *juicefs) SetQuota(ctx context.Context, secrets map[string]string, jfsSe res, err := quotaCmd.CombinedOutput() if err == nil { - klog.V(5).Infof("quota set success: %s", string(res)) + log.Info("quota set success", "output", string(res)) } return wrapSetQuotaErr(string(res), err) } @@ -826,14 +766,14 @@ func (j *juicefs) SetQuota(ctx context.Context, secrets map[string]string, jfsSe quotaCmd.SetEnv(envs) res, err := quotaCmd.CombinedOutput() if err == nil { - klog.V(5).Infof("quota set success: %s", string(res)) + log.Info("quota set success", "output", string(res)) } done <- wrapSetQuotaErr(string(res), err) close(done) }() select { case <-cmdCtx.Done(): - klog.Warningf("quota set timeout, runs in background") + log.Info("quota set timeout, runs in background") return nil case err = <-done: return err @@ -844,7 +784,7 @@ func wrapSetQuotaErr(res string, err error) error { if err != nil { re := string(res) if strings.Contains(re, "invalid command: quota") || strings.Contains(re, "No help topic for 'quota'") { - klog.Info("juicefs inside do not support quota, skip it.") + jfsLog.Info("juicefs inside do not support quota, skip it.") return nil } return errors.Wrap(err, re) @@ -856,7 +796,7 @@ func wrapStatusErr(res string, err error) error { if err != nil { re := string(res) if strings.Contains(re, "database is not formatted") { - klog.Infof("juicefs not formatted, ignore status command error") + jfsLog.Info("juicefs not formatted, ignore status command error") return nil } return errors.Wrap(err, re) @@ -877,6 +817,7 @@ func (j *juicefs) GetSubPath(ctx context.Context, volumeID string) (string, erro // MountFs mounts JuiceFS with idempotency func (j *juicefs) MountFs(ctx context.Context, appInfo *config.AppInfo, jfsSetting *config.JfsSetting) (string, error) { + log := util.GenLog(ctx, jfsLog, "MountFs") var mnt podmount.MntInterface if jfsSetting.UsePod { jfsSetting.MountPath = filepath.Join(config.PodMountBase, jfsSetting.UniqueId) @@ -890,7 +831,7 @@ func (j *juicefs) MountFs(ctx context.Context, appInfo *config.AppInfo, jfsSetti if err != nil { return "", err } - klog.V(5).Infof("Mount: mounting %q at %q with options %v", util.StripPasswd(jfsSetting.Source), jfsSetting.MountPath, jfsSetting.Options) + log.Info("mounting with options", "source", util.StripPasswd(jfsSetting.Source), "mountPath", jfsSetting.MountPath, "options", jfsSetting.Options) return jfsSetting.MountPath, nil } @@ -912,19 +853,21 @@ func (j *juicefs) Upgrade() { err := exec.CommandContext(ctx, config.CliPath, "version", "-u").Run() if ctx.Err() == context.DeadlineExceeded { - klog.V(5).Infof("Upgrade: did not finish in %v", timeout) + jfsLog.Info("Upgrade: did not finish", "time", timeout) return } if err != nil { - klog.V(5).Infof("Upgrade: err %v", err) + jfsLog.Error(err, "Upgrade juicefs err") return } - klog.V(5).Infof("Upgrade: successfully upgraded to newest version") + jfsLog.Info("Upgrade: successfully upgraded to newest version") } func (j *juicefs) ceFormat(ctx context.Context, secrets map[string]string, noUpdate bool, setting *config.JfsSetting) (string, error) { + log := util.GenLog(ctx, jfsLog, "ceFormat") + if secrets == nil { return "", status.Errorf(codes.InvalidArgument, "Nil secrets") } @@ -981,7 +924,7 @@ func (j *juicefs) ceFormat(ctx context.Context, secrets map[string]string, noUpd cmdArgs = append(cmdArgs, stripped...) } - klog.V(5).Infof("ceFormat cmd: %v", cmdArgs) + log.Info("ce format cmd", "args", cmdArgs) // only run command when in process mode if !config.ByProcess { @@ -1001,10 +944,10 @@ func (j *juicefs) ceFormat(ctx context.Context, secrets map[string]string, noUpd } formatCmd.SetEnv(envs) res, err := formatCmd.CombinedOutput() - klog.Infof("Format output is %s", res) + log.Info("format output", "output", res) if err != nil { re := string(res) - klog.Infof("Format error: %v", err) + log.Error(err, "format error") if cmdCtx.Err() == context.DeadlineExceeded { re = fmt.Sprintf("juicefs format %s timed out", 8*defaultCheckTimeout) return "", errors.New(re) @@ -1016,10 +959,11 @@ func (j *juicefs) ceFormat(ctx context.Context, secrets map[string]string, noUpd // Status checks the status of JuiceFS, only for community edition func (j *juicefs) Status(ctx context.Context, metaUrl string) error { + log := util.GenLog(ctx, jfsLog, "status") args := []string{"status", metaUrl} cmdArgs := []string{config.CeCliPath, "status", "${metaurl}"} - klog.Infof("Status cmd: %s", strings.Join(cmdArgs, " ")) + log.Info("juicefs status cmd", "command", strings.Join(cmdArgs, " ")) cmdCtx, cmdCancel := context.WithTimeout(ctx, 2*defaultCheckTimeout) defer cmdCancel() diff --git a/pkg/juicefs/juicefs_test.go b/pkg/juicefs/juicefs_test.go index c6f96052ca..b9ff367837 100644 --- a/pkg/juicefs/juicefs_test.go +++ b/pkg/juicefs/juicefs_test.go @@ -1212,176 +1212,3 @@ func Test_juicefs_validTarget(t *testing.T) { }) } } - -func Test_parseRawVersion(t *testing.T) { - type parseRawVersionCase struct { - name string - raw string - want clientVersion - fail bool - } - - cases := []parseRawVersionCase{ - { - name: "test-normal", - raw: "juicefs version 0.11.0", - want: clientVersion{ - Major: 0, - Minor: 11, - Patch: 0, - }, - }, - { - name: "test-normal2", - raw: "juicefs version 0.11.0-rc1", - want: clientVersion{ - Major: 0, - Minor: 11, - Patch: 0, - }, - }, - { - name: "test-normal3", - raw: "juicefs version 1.1.0-dev+2023-04-12.2488ce21", - want: clientVersion{ - Major: 1, - Minor: 1, - Patch: 0, - }, - }, - { - name: "test-normal4", - raw: "JuiceFS version 4.10.0 (2023-04-10 9e92fe6f)", - want: clientVersion{ - Major: 4, - Minor: 10, - Patch: 0, - }, - }, - { - name: "test-normal5", - raw: "juicefs version 1.0.4+2023-04-06.f1c475d9", - want: clientVersion{ - Major: 1, - Minor: 0, - Patch: 4, - }, - }, - { - name: "test-error", - raw: "juicefs version 0.11", - want: clientVersion{}, - fail: true, - }, - { - name: "test-error2", - raw: "juicefs version", - want: clientVersion{}, - fail: true, - }, - { - name: "test-error3", - raw: "juicefs 0.11", - want: clientVersion{}, - fail: true, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got, err := parseRawVersion(c.raw) - if err != nil && !c.fail { - t.Errorf("parseRawVersion() error = %v, wantErr %v", err, c.fail) - return - } - if err == nil && c.fail { - t.Errorf("parseRawVersion(\"%s\") wantErr %v", c.raw, c.fail) - return - } - if err != nil && c.fail { - return - } - if !got.Approximate(&c.want) { - t.Errorf("parseRawVersion() got = %v, want %v", got, c.want) - } - }) - } -} - -func Test_clientVersion_LessThan(t *testing.T) { - type fields struct { - Major int - Minor int - Patch int - } - type args struct { - other clientVersion - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "test-normal", - fields: fields{ - Major: 0, - Minor: 11, - Patch: 0, - }, - args: args{ - other: clientVersion{ - Major: 0, - Minor: 11, - Patch: 1, - }, - }, - want: true, - }, - { - name: "test-normal2", - fields: fields{ - Major: 0, - Minor: 11, - Patch: 0, - }, - args: args{ - other: clientVersion{ - Major: 0, - Minor: 10, - Patch: 1, - }, - }, - want: false, - }, - { - name: "test-normal3", - fields: fields{ - Major: 0, - Minor: 11, - Patch: 0, - }, - args: args{ - other: clientVersion{ - Major: 0, - Minor: 11, - Patch: 0, - }, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &clientVersion{ - Major: tt.fields.Major, - Minor: tt.fields.Minor, - Patch: tt.fields.Patch, - } - if got := c.LessThan(&tt.args.other); got != tt.want { - t.Errorf("clientVersion.LessThan() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/juicefs/mount/builder/common.go b/pkg/juicefs/mount/builder/common.go index 0ec0e03d6a..2a1d590d7a 100644 --- a/pkg/juicefs/mount/builder/common.go +++ b/pkg/juicefs/mount/builder/common.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -81,7 +80,7 @@ func (r *BaseBuilder) genPodTemplate(baseCnGen func() corev1.Container) *corev1. func (r *BaseBuilder) genCommonJuicePod(cnGen func() corev1.Container) *corev1.Pod { // gen again to update the mount pod spec if err := config.GenPodAttrWithCfg(r.jfsSetting, nil); err != nil { - klog.Warningf("genCommonJuicePod gen pod attr failed, mount pod may not be the expected config %+v", err) + builderLog.Error(err, "genCommonJuicePod gen pod attr failed, mount pod may not be the expected config") } pod := r.genPodTemplate(cnGen) // labels & annotations @@ -146,7 +145,7 @@ func (r *BaseBuilder) genMountCommand() string { cmd := "" options := r.jfsSetting.Options if r.jfsSetting.IsCe { - klog.V(5).Infof("ceMount: mount %v at %v", util.StripPasswd(r.jfsSetting.Source), r.jfsSetting.MountPath) + builderLog.Info("ceMount", "source", util.StripPasswd(r.jfsSetting.Source), "mountPath", r.jfsSetting.MountPath) mountArgs := []string{"exec", config.CeMountPath, "${metaurl}", security.EscapeBashStr(r.jfsSetting.MountPath)} if !util.ContainsPrefix(options, "metrics=") { if r.jfsSetting.Attr.HostNetwork { @@ -159,7 +158,7 @@ func (r *BaseBuilder) genMountCommand() string { mountArgs = append(mountArgs, "-o", security.EscapeBashStr(strings.Join(options, ","))) cmd = strings.Join(mountArgs, " ") } else { - klog.V(5).Infof("Mount: mount %v at %v", util.StripPasswd(r.jfsSetting.Source), r.jfsSetting.MountPath) + builderLog.Info("eeMount", "source", util.StripPasswd(r.jfsSetting.Source), "mountPath", r.jfsSetting.MountPath) mountArgs := []string{"exec", config.JfsMountPath, security.EscapeBashStr(r.jfsSetting.Source), security.EscapeBashStr(r.jfsSetting.MountPath)} mountOptions := []string{"foreground", "no-update"} if r.jfsSetting.EncryptRsaKey != "" { diff --git a/pkg/juicefs/mount/builder/interface.go b/pkg/juicefs/mount/builder/interface.go index d058923409..c6f8471613 100644 --- a/pkg/juicefs/mount/builder/interface.go +++ b/pkg/juicefs/mount/builder/interface.go @@ -16,7 +16,14 @@ package builder -import corev1 "k8s.io/api/core/v1" +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +var ( + builderLog = klog.NewKlogr().WithName("builder") +) type SidecarInterface interface { NewMountSidecar() *corev1.Pod diff --git a/pkg/juicefs/mount/builder/job.go b/pkg/juicefs/mount/builder/job.go index efb3be08cd..1907470d92 100644 --- a/pkg/juicefs/mount/builder/job.go +++ b/pkg/juicefs/mount/builder/job.go @@ -24,7 +24,6 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -54,7 +53,7 @@ func (r *JobBuilder) NewJobForCreateVolume() *batchv1.Job { cmd := strings.Join([]string{initCmd, jobCmd}, "\n") job.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", cmd} - klog.Infof("create volume job cmd: %s", jobCmd) + builderLog.Info("create volume job", "command", jobCmd) return job } @@ -65,7 +64,7 @@ func (r *JobBuilder) NewJobForDeleteVolume() *batchv1.Job { initCmd := r.genInitCommand() cmd := strings.Join([]string{initCmd, jobCmd}, "\n") job.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", cmd} - klog.Infof("delete volume job cmd: %s", jobCmd) + builderLog.Info("delete volume job", "command", jobCmd) return job } diff --git a/pkg/juicefs/mount/builder/vci-serverless.go b/pkg/juicefs/mount/builder/vci-serverless.go index a62740cea3..2d6fd42f92 100644 --- a/pkg/juicefs/mount/builder/vci-serverless.go +++ b/pkg/juicefs/mount/builder/vci-serverless.go @@ -23,7 +23,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/json" - "k8s.io/klog" utilpointer "k8s.io/utils/pointer" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -88,7 +87,7 @@ func (r *VCIBuilder) NewMountSidecar() *corev1.Pod { if err := json.Unmarshal([]byte(v), &vciPropagations); err == nil { propagations = append(propagations, vciPropagations...) } else { - klog.Errorf("failed to unmarshal VCIPropagation annotation: %v", err) + builderLog.Error(err, "failed to unmarshal VCIPropagation annotation") } } VCIPropagationBytes, _ := json.Marshal(propagations) diff --git a/pkg/juicefs/mount/pod_mount.go b/pkg/juicefs/mount/pod_mount.go index 521ffbe7d6..777b290bc3 100644 --- a/pkg/juicefs/mount/pod_mount.go +++ b/pkg/juicefs/mount/pod_mount.go @@ -35,7 +35,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" - "k8s.io/klog" + "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -48,6 +48,7 @@ import ( ) type PodMount struct { + log klog.Logger k8sMount.SafeFormatAndMount K8sClient *k8sclient.K8sClient } @@ -55,11 +56,17 @@ type PodMount struct { var _ MntInterface = &PodMount{} func NewPodMount(client *k8sclient.K8sClient, mounter k8sMount.SafeFormatAndMount) MntInterface { - return &PodMount{mounter, client} + return &PodMount{ + klog.NewKlogr().WithName("pod-mount"), + mounter, client} } func (p *PodMount) JMount(ctx context.Context, appInfo *jfsConfig.AppInfo, jfsSetting *jfsConfig.JfsSetting) error { - hashVal := GenHashOfSetting(*jfsSetting) + if appInfo != nil { + p.log = p.log.WithValues("appName", appInfo.Name) + } + log := p.log.WithName("JMount") + hashVal := GenHashOfSetting(log, *jfsSetting) jfsSetting.HashVal = hashVal var podName string var err error @@ -115,7 +122,7 @@ func (p *PodMount) GetMountRef(ctx context.Context, target, podName string) (int if k8serrors.IsNotFound(err) { return 0, nil } - klog.Errorf("JUmount: Get mount pod %s err %v", podName, err) + p.log.Error(err, "Get mount pod error", "podName", podName) return 0, err } return GetRef(pod), nil @@ -124,18 +131,18 @@ func (p *PodMount) GetMountRef(ctx context.Context, target, podName string) (int func (p *PodMount) UmountTarget(ctx context.Context, target, podName string) error { // targetPath may be mount bind many times when mount point recovered. // umount until it's not mounted. - klog.V(5).Infof("JfsUnmount: lazy umount %s", target) + p.log.Info("lazy umount", "target", target) for { command := exec.Command("umount", "-l", target) out, err := command.CombinedOutput() if err == nil { continue } - klog.V(6).Infoln(string(out)) + p.log.V(1).Info(string(out)) if !strings.Contains(string(out), "not mounted") && !strings.Contains(string(out), "mountpoint not found") && !strings.Contains(string(out), "no mount point specified") { - klog.Errorf("Could not lazy unmount %q: %v, output: %s", target, err, string(out)) + p.log.Error(err, "Could not lazy unmount", "target", target, "out", string(out)) return err } break @@ -143,32 +150,32 @@ func (p *PodMount) UmountTarget(ctx context.Context, target, podName string) err // cleanup target path if err := k8sMount.CleanupMountPoint(target, p.SafeFormatAndMount.Interface, false); err != nil { - klog.V(5).Infof("Clean mount point error: %v", err) + p.log.Info("Clean mount point error", "error", err) return err } // check mount pod is need to delete - klog.V(5).Infof("JUmount: Delete target ref [%s] and check mount pod [%s] is need to delete or not.", target, podName) + p.log.Info("Delete target ref and check mount pod is need to delete or not.", "target", target, "podName", podName) if podName == "" { // mount pod not exist - klog.V(5).Infof("JUmount: Mount pod of target %s not exists.", target) + p.log.Info("Mount pod of target not exists.", "target", target) return nil } pod, err := p.K8sClient.GetPod(ctx, podName, jfsConfig.Namespace) if err != nil && !k8serrors.IsNotFound(err) { - klog.Errorf("JUmount: Get pod %s err: %v", podName, err) + p.log.Error(err, "Get pod err", "podName", podName) return err } // if mount pod not exists. if pod == nil { - klog.V(5).Infof("JUmount: Mount pod %v not exists.", podName) + p.log.Info("Mount pod not exists", "podName", podName) return nil } key := util.GetReferenceKey(target) - klog.V(6).Infof("JUmount: Target %v hash of target %v", target, key) + p.log.V(1).Info("Target hash of target", "target", target, "key", key) err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { po, err := p.K8sClient.GetPod(ctx, pod.Name, pod.Namespace) @@ -177,13 +184,13 @@ func (p *PodMount) UmountTarget(ctx context.Context, target, podName string) err } annotation := po.Annotations if _, ok := annotation[key]; !ok { - klog.V(5).Infof("JUmount: Target ref [%s] in pod [%s] already not exists.", target, pod.Name) + p.log.Info("Target ref in pod already not exists.", "target", target, "podName", pod.Name) return nil } return resource.DelPodAnnotation(ctx, p.K8sClient, pod, []string{key}) }) if err != nil { - klog.Errorf("JUmount: Remove ref of target %s err: %v", target, err) + p.log.Error(err, "Remove ref of target err", "target", target) return err } return nil @@ -196,12 +203,12 @@ func (p *PodMount) JUmount(ctx context.Context, target, podName string) error { if k8serrors.IsNotFound(err) { return nil } - klog.Errorf("JUmount: Get mount pod %s err %v", podName, err) + p.log.Error(err, "Get mount pod err", "podName", podName) return err } if GetRef(po) != 0 { - klog.V(5).Infof("JUmount: pod %s still has juicefs- refs.", podName) + p.log.Info("pod still has juicefs- refs.", "podName", podName) return nil } @@ -212,9 +219,9 @@ func (p *PodMount) JUmount(ctx context.Context, target, podName string) error { } if !shouldDelay { // do not set delay delete, delete it now - klog.V(5).Infof("JUmount: pod %s has no juicefs- refs. delete it.", podName) + p.log.Info("pod has no juicefs- refs. delete it.", "podName", podName) if err := p.K8sClient.DeletePod(ctx, po); err != nil { - klog.V(5).Infof("JUmount: Delete pod %s error: %v", podName, err) + p.log.Info("Delete pod error", "podName", podName, "error", err) return err } @@ -225,10 +232,10 @@ func (p *PodMount) JUmount(ctx context.Context, target, podName string) error { // delete related secret secretName := po.Name + "-secret" - klog.V(6).Infof("JUmount: delete related secret of pod %s: %s", podName, secretName) + p.log.V(1).Info("delete related secret of pod", "podName", podName, "secretName", secretName) if err := p.K8sClient.DeleteSecret(ctx, secretName, po.Namespace); !k8serrors.IsNotFound(err) && err != nil { // do not return err if delete secret failed - klog.V(6).Infof("JUmount: Delete secret %s error: %v", secretName, err) + p.log.V(1).Info("Delete secret error", "secretName", secretName, "error", err) } } return nil @@ -238,20 +245,21 @@ func (p *PodMount) JUmount(ctx context.Context, target, podName string) error { } func (p *PodMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + log := p.log.WithName("JCreateVolume") var exist *batchv1.Job r := builder.NewJobBuilder(jfsSetting, 0) job := r.NewJobForCreateVolume() exist, err := p.K8sClient.GetJob(ctx, job.Name, job.Namespace) if err != nil && k8serrors.IsNotFound(err) { - klog.V(5).Infof("JCreateVolume: create job %s", job.Name) + log.Info("create job", "jobName", job.Name) exist, err = p.K8sClient.CreateJob(ctx, job) if err != nil { - klog.Errorf("JCreateVolume: create job %s err: %v", job.Name, err) + log.Error(err, "create job err", "jobName", job.Name) return err } } if err != nil { - klog.Errorf("JCreateVolume: get job %s err: %s", job.Name, err) + log.Error(err, "get job err", "jobName", job.Name) return err } secret := r.NewSecret() @@ -263,27 +271,28 @@ func (p *PodMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig.JfsS if err != nil { // fall back if err if e := p.K8sClient.DeleteJob(ctx, job.Name, job.Namespace); e != nil { - klog.Errorf("JCreateVolume: delete job %s error: %v", job.Name, e) + log.Error(e, "delete job error", "jobName", job.Name) } } return err } func (p *PodMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + log := p.log.WithName("JDeleteVolume") var exist *batchv1.Job r := builder.NewJobBuilder(jfsSetting, 0) job := r.NewJobForDeleteVolume() exist, err := p.K8sClient.GetJob(ctx, job.Name, job.Namespace) if err != nil && k8serrors.IsNotFound(err) { - klog.V(5).Infof("JDeleteVolume: create job %s", job.Name) + log.Info("create job", "jobName", job.Name) exist, err = p.K8sClient.CreateJob(ctx, job) if err != nil { - klog.Errorf("JDeleteVolume: create job %s err: %v", job.Name, err) + log.Error(err, "create job err", "jobName", job.Name) return err } } if err != nil { - klog.Errorf("JDeleteVolume: get job %s err: %s", job.Name, err) + log.Error(err, "get job err", "jobName", job.Name) return err } secret := r.NewSecret() @@ -295,7 +304,7 @@ func (p *PodMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsS if err != nil { // fall back if err if e := p.K8sClient.DeleteJob(ctx, job.Name, job.Namespace); e != nil { - klog.Errorf("JDeleteVolume: delete job %s error: %v", job.Name, e) + log.Error(e, "delete job error", "jobName", job.Name) } } return err @@ -309,7 +318,7 @@ func (p *PodMount) genMountPodName(ctx context.Context, jfsSetting *jfsConfig.Jf }} pods, err := p.K8sClient.ListPod(ctx, jfsConfig.Namespace, labelSelector, nil) if err != nil { - klog.Errorf("List pods of uniqueId %s and hash %s error: %v", jfsSetting.UniqueId, jfsSetting.HashVal, err) + p.log.Error(err, "List pods of uniqueId", "uniqueId", jfsSetting.UniqueId, "hashVal", jfsSetting.HashVal) return "", err } for _, pod := range pods { @@ -324,7 +333,8 @@ func (p *PodMount) genMountPodName(ctx context.Context, jfsSetting *jfsConfig.Jf } func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSetting *jfsConfig.JfsSetting, appinfo *jfsConfig.AppInfo) (err error) { - klog.V(6).Infof("createOrAddRef: mount pod name %s", podName) + log := p.log.WithName("createOrAddRef") + log.V(1).Info("mount pod", "podName", podName) jfsSetting.MountPath = jfsSetting.MountPath + podName[len(podName)-7:] jfsSetting.SecretName = fmt.Sprintf("juicefs-%s-secret", jfsSetting.UniqueId) // mkdir mountpath @@ -346,16 +356,16 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin // wait for old pod deleted oldPod, err := p.K8sClient.GetPod(waitCtx, podName, jfsConfig.Namespace) if err == nil && oldPod.DeletionTimestamp != nil { - klog.V(6).Infof("createOrAddRef: wait for old mount pod deleted.") + log.V(1).Info("wait for old mount pod deleted.", "podName", podName) time.Sleep(time.Millisecond * 500) continue } else if err != nil { if k8serrors.IsNotFound(err) { // pod not exist, create - klog.V(5).Infof("createOrAddRef: Need to create pod %s.", podName) + log.Info("Need to create pod", "podName", podName) newPod, err := r.NewMountPod(podName) if err != nil { - klog.Errorf("Make new mount pod %s error: %v", podName, err) + log.Error(err, "Make new mount pod error", "podName", podName) return err } newPod.Annotations[key] = jfsSetting.TargetPath @@ -366,14 +376,14 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin } nodes, err := p.K8sClient.ListNode(ctx, &metav1.LabelSelector{MatchLabels: nodeSelector}) if err != nil || len(nodes) != 1 || nodes[0].Name != newPod.Spec.NodeName { - klog.Warningf("cannot select node %s by label selector: %v", newPod.Spec.NodeName, err) + log.Info("cannot select node by label selector", "nodeName", newPod.Spec.NodeName, "error", err) } else { newPod.Spec.NodeName = "" newPod.Spec.NodeSelector = nodeSelector if appinfo != nil && appinfo.Name != "" { appPod, err := p.K8sClient.GetPod(ctx, appinfo.Name, appinfo.Namespace) if err != nil { - klog.Warningf("get app pod %s/%s: %v", appinfo.Namespace, appinfo.Name, err) + log.Info("get app pod", "namespace", appinfo.Namespace, "name", appinfo.Name, "error", err) } else { newPod.Spec.Affinity = appPod.Spec.Affinity newPod.Spec.SchedulerName = appPod.Spec.SchedulerName @@ -385,7 +395,7 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin if util.SupportFusePass(jfsSetting.Attr.Image) { if err := fuse.GlobalFds.ServeFuseFd(ctx, newPod.Labels[jfsConfig.PodJuiceHashLabelKey]); err != nil { - klog.Error(err) + log.Error(err, "serve fuse fd error") } } @@ -394,14 +404,14 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin } _, err = p.K8sClient.CreatePod(ctx, newPod) if err != nil { - klog.Errorf("createOrAddRef: Create pod %s err: %v", podName, err) + log.Error(err, "Create pod err", "podName", podName) } return err } else if k8serrors.IsTimeout(err) { return fmt.Errorf("mount %v failed: mount pod %s deleting timeout", jfsSetting.VolumeId, podName) } // unexpect error - klog.Errorf("createOrAddRef: Get pod %s err: %v", podName, err) + log.Error(err, "Get pod err", "podName", podName) return err } // pod exist, add refs @@ -413,6 +423,7 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin } func (p *PodMount) waitUtilMountReady(ctx context.Context, jfsSetting *jfsConfig.JfsSetting, podName string) error { + ctx = util.WithLog(ctx, p.log) err := resource.WaitUtilMountReady(ctx, podName, jfsSetting.MountPath, defaultCheckTimeout) if err == nil { return nil @@ -420,13 +431,14 @@ func (p *PodMount) waitUtilMountReady(ctx context.Context, jfsSetting *jfsConfig // mountpoint not ready, get mount pod log for detail log, err := p.getErrContainerLog(ctx, podName) if err != nil { - klog.Errorf("Get pod %s log error %v", podName, err) + p.log.Error(err, "Get pod log error", "podName", podName) return fmt.Errorf("mount %v at %v failed: mount isn't ready in 30 seconds", util.StripPasswd(jfsSetting.Source), jfsSetting.MountPath) } return fmt.Errorf("mount %v at %v failed, mountpod: %s, failed log: %v", util.StripPasswd(jfsSetting.Source), jfsSetting.MountPath, podName, log) } func (p *PodMount) waitUtilJobCompleted(ctx context.Context, jobName string) error { + log := p.log.WithName("waitUtilJobCompleted") // Wait until the job is completed waitCtx, waitCancel := context.WithTimeout(ctx, 40*time.Second) defer waitCancel() @@ -434,22 +446,22 @@ func (p *PodMount) waitUtilJobCompleted(ctx context.Context, jobName string) err job, err := p.K8sClient.GetJob(waitCtx, jobName, jfsConfig.Namespace) if err != nil { if k8serrors.IsNotFound(err) { - klog.Infof("waitUtilJobCompleted: Job %s is completed and been recycled", jobName) + log.Info("waitUtilJobCompleted: Job is completed and been recycled", "jobName", jobName) return nil } if waitCtx.Err() == context.DeadlineExceeded || waitCtx.Err() == context.Canceled { - klog.V(6).Infof("job %s timeout", jobName) + log.V(1).Info("job timeout", "jobName", jobName) break } return fmt.Errorf("waitUtilJobCompleted: Get job %v failed: %v", jobName, err) } if resource.IsJobCompleted(job) { - klog.V(5).Infof("waitUtilJobCompleted: Job %s is completed", jobName) + log.Info("waitUtilJobCompleted: Job is completed", "jobName", jobName) if resource.IsJobShouldBeRecycled(job) { // try to delete job - klog.Infof("job %s completed but not be recycled automatically, delete it", jobName) + log.Info("job completed but not be recycled automatically, delete it", "jobName", jobName) if err := p.K8sClient.DeleteJob(ctx, jobName, jfsConfig.Namespace); err != nil { - klog.Errorf("delete job %s error %v", jobName, err) + log.Error(err, "delete job error", "jobName", jobName) } } return nil @@ -465,15 +477,16 @@ func (p *PodMount) waitUtilJobCompleted(ctx context.Context, jobName string) err if err != nil || len(pods) == 0 { return fmt.Errorf("waitUtilJobCompleted: get pod from job %s error %v", jobName, err) } - log, err := p.getNotCompleteCnLog(ctx, pods[0].Name) + cnlog, err := p.getNotCompleteCnLog(ctx, pods[0].Name) if err != nil { return fmt.Errorf("waitUtilJobCompleted: get pod %s log error %v", pods[0].Name, err) } - return fmt.Errorf("waitUtilJobCompleted: job %s isn't completed: %v", jobName, log) + return fmt.Errorf("waitUtilJobCompleted: job %s isn't completed: %v", jobName, cnlog) } func (p *PodMount) AddRefOfMount(ctx context.Context, target string, podName string) error { - klog.V(5).Infof("addRefOfMount: Add target ref in mount pod. mount pod: [%s], target: [%s]", podName, target) + log := p.log.WithName("AddRefOfMount") + log.Info("Add target ref in mount pod.", "podName", podName, "target", target) // add volumeId ref in pod annotation key := util.GetReferenceKey(target) @@ -487,7 +500,7 @@ func (p *PodMount) AddRefOfMount(ctx context.Context, target string, podName str } annotation := exist.Annotations if _, ok := annotation[key]; ok { - klog.V(5).Infof("addRefOfMount: Target ref [%s] in pod [%s] already exists.", target, podName) + log.Info("Target ref in pod already exists.", "target", target, "podName", podName) return nil } if annotation == nil { @@ -499,7 +512,7 @@ func (p *PodMount) AddRefOfMount(ctx context.Context, target string, podName str return resource.ReplacePodAnnotation(ctx, p.K8sClient, exist, annotation) }) if err != nil { - klog.Errorf("addRefOfMount: Add target ref in mount pod %s error: %v", podName, err) + log.Error(err, "Add target ref in mount pod error", "podName", podName) return err } return nil @@ -511,7 +524,7 @@ func (p *PodMount) setUUIDAnnotation(ctx context.Context, podName string, uuid s if err != nil { return err } - klog.Infof("setUUIDAnnotation: set pod %s annotation %s=%s", podName, jfsConfig.JuiceFSUUID, uuid) + p.log.Info("set pod annotation", "podName", podName, "key", jfsConfig.JuiceFSUUID, "uuid", uuid) return resource.AddPodAnnotation(ctx, p.K8sClient, pod, map[string]string{jfsConfig.JuiceFSUUID: uuid}) } @@ -521,7 +534,7 @@ func (p *PodMount) setMountLabel(ctx context.Context, uniqueId, mountPodName str if err != nil { return err } - klog.Infof("setMountLabel: set mount info in pod %s", podName) + p.log.Info("set mount info in pod", "podName", podName) if err := resource.AddPodLabel(ctx, p.K8sClient, pod, map[string]string{jfsConfig.UniqueId: ""}); err != nil { return err } @@ -542,7 +555,7 @@ func (p *PodMount) GetJfsVolUUID(ctx context.Context, jfsSetting *jfsConfig.JfsS stdout, err := statusCmd.CombinedOutput() if err != nil { re := string(stdout) - klog.Infof("juicefs status error: %v, output: '%s'", err, re) + p.log.Info("juicefs status error", "error", err, "output", re) if cmdCtx.Err() == context.DeadlineExceeded { re = fmt.Sprintf("juicefs status %s timed out", 8*defaultCheckTimeout) return "", errors.New(re) @@ -561,9 +574,10 @@ func (p *PodMount) GetJfsVolUUID(ctx context.Context, jfsSetting *jfsConfig.JfsS } func (p *PodMount) CleanCache(ctx context.Context, image string, id string, volumeId string, cacheDirs []string) error { + log := p.log.WithName("CleanCache") jfsSetting, err := jfsConfig.ParseSetting(map[string]string{"name": id}, nil, []string{}, true, nil, nil) if err != nil { - klog.Errorf("CleanCache: parse jfs setting err: %v", err) + log.Error(err, "parse jfs setting err") return err } jfsSetting.Attr.Image = image @@ -572,29 +586,30 @@ func (p *PodMount) CleanCache(ctx context.Context, image string, id string, volu jfsSetting.UUID = id r := builder.NewJobBuilder(jfsSetting, 0) job := r.NewJobForCleanCache() - klog.V(6).Infof("Clean cache job: %v", job) + log.V(1).Info("Clean cache job", "jobName", job) _, err = p.K8sClient.GetJob(ctx, job.Name, job.Namespace) if err != nil && k8serrors.IsNotFound(err) { - klog.V(5).Infof("CleanCache: create job %s", job.Name) + log.Info("create job", "jobName", job.Name) _, err = p.K8sClient.CreateJob(ctx, job) } if err != nil { - klog.Errorf("CleanCache: get or create job %s err: %s", job.Name, err) + log.Error(err, "get or create job err", "jobName", job.Name) return err } err = p.waitUtilJobCompleted(ctx, job.Name) if err != nil { - klog.Errorf("CleanCache: wait for job completed err and fall back to delete job\n %v", err) + log.Error(err, "wait for job completed err and fall back to delete job") // fall back if err if e := p.K8sClient.DeleteJob(ctx, job.Name, job.Namespace); e != nil { - klog.Errorf("CleanCache: delete job %s error: %v", job.Name, e) + log.Error(e, "delete job %s error: %v", "jobName", job.Name) } } return nil } func (p *PodMount) createOrUpdateSecret(ctx context.Context, secret *corev1.Secret) error { - klog.V(5).Infof("createOrUpdateSecret: %s, %s", secret.Name, secret.Namespace) + log := p.log.WithName("createOrUpdateSecret") + log.Info("secret", "name", secret.Name, "namespace", secret.Namespace) err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { oldSecret, err := p.K8sClient.GetSecret(ctx, secret.Name, jfsConfig.Namespace) if err != nil { @@ -625,7 +640,7 @@ func (p *PodMount) createOrUpdateSecret(ctx context.Context, secret *corev1.Secr return p.K8sClient.UpdateSecret(ctx, oldSecret) }) if err != nil { - klog.Errorf("createOrUpdateSecret: secret %s: %v", secret.Name, err) + log.Error(err, "create or update secret error", "secretName", secret.Name) return err } return nil @@ -688,7 +703,7 @@ func GenPodNameByUniqueId(uniqueId string, withRandom bool) string { return fmt.Sprintf("juicefs-%s-%s-%s", jfsConfig.NodeName, uniqueId, util.RandStringRunes(6)) } -func GenHashOfSetting(setting jfsConfig.JfsSetting) string { +func GenHashOfSetting(log klog.Logger, setting jfsConfig.JfsSetting) string { // target path should not affect hash val setting.TargetPath = "" setting.VolumeId = "" @@ -697,6 +712,6 @@ func GenHashOfSetting(setting jfsConfig.JfsSetting) string { h := sha256.New() h.Write(settingStr) val := hex.EncodeToString(h.Sum(nil))[:63] - klog.V(6).Infof("jfsSetting hash: %s", val) + log.V(1).Info("get jfsSetting hash", "hashVal", val) return val } diff --git a/pkg/juicefs/mount/pod_mount_test.go b/pkg/juicefs/mount/pod_mount_test.go index d9e9b2a272..2b1797f88d 100644 --- a/pkg/juicefs/mount/pod_mount_test.go +++ b/pkg/juicefs/mount/pod_mount_test.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" @@ -227,6 +228,7 @@ func TestAddRefOfMount(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{Interface: fakeClientSet}, } @@ -260,6 +262,7 @@ func TestAddRefOfMountWithMock(t *testing.T) { }) defer patch1.Reset() p := &PodMount{ + log: klog.NewKlogr(), K8sClient: &k8sclient.K8sClient{Interface: fake.NewSimpleClientset()}, } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -339,6 +342,7 @@ func TestJUmount(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClientSet, @@ -388,6 +392,7 @@ func TestJUmountWithMock(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClient, @@ -417,6 +422,7 @@ func TestJUmountWithMock(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClient, @@ -447,6 +453,7 @@ func TestJUmountWithMock(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClient, @@ -514,6 +521,7 @@ func TestUmountTarget(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClient, @@ -551,6 +559,7 @@ func TestUmountTarget(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{ Interface: fakeClient, @@ -647,11 +656,12 @@ func TestWaitUntilMount(t *testing.T) { }) defer patch.Reset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{Interface: fakeClientSet}, } if tt.pod != nil { - hashVal := GenHashOfSetting(*tt.args.jfsSetting) + hashVal := GenHashOfSetting(klog.NewKlogr(), *tt.args.jfsSetting) tt.args.jfsSetting.HashVal = hashVal tt.pod.Labels = map[string]string{ jfsConfig.PodTypeKey: jfsConfig.PodTypeValue, @@ -700,6 +710,7 @@ func TestWaitUntilMountWithMock(t *testing.T) { fakeClient := fake.NewSimpleClientset() p := &PodMount{ + log: klog.NewKlogr(), SafeFormatAndMount: mount.SafeFormatAndMount{}, K8sClient: &k8sclient.K8sClient{Interface: fakeClient}, } @@ -760,6 +771,7 @@ func TestNewPodMount(t *testing.T) { client: nil, }, want: &PodMount{ + log: klog.NewKlogr().WithName("pod-mount"), SafeFormatAndMount: mount.SafeFormatAndMount{ Interface: mount.New(""), Exec: k8sexec.New(), @@ -858,7 +870,7 @@ func TestGenHashOfSetting(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := GenHashOfSetting(tt.args.setting) + got := GenHashOfSetting(klog.NewKlogr(), tt.args.setting) if got != tt.want { t.Errorf("GenHashOfSetting() got = %v, want %v", got, tt.want) } diff --git a/pkg/juicefs/mount/process_mount.go b/pkg/juicefs/mount/process_mount.go index 9bb4495ca9..7d68043118 100644 --- a/pkg/juicefs/mount/process_mount.go +++ b/pkg/juicefs/mount/process_mount.go @@ -27,7 +27,7 @@ import ( "time" _ "github.com/golang/mock/mockgen/model" - "k8s.io/klog" + "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -37,16 +37,18 @@ import ( const defaultCheckTimeout = 2 * time.Second type ProcessMount struct { + log klog.Logger k8sMount.SafeFormatAndMount } var _ MntInterface = &ProcessMount{} func NewProcessMount(mounter k8sMount.SafeFormatAndMount) MntInterface { - return &ProcessMount{mounter} + return &ProcessMount{klog.NewKlogr().WithName("process-mount"), mounter} } func (p *ProcessMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + log := p.log.WithName("JCreateVolume") // 1. mount juicefs options := util.StripReadonlyOption(jfsSetting.Options) err := p.jmount(ctx, jfsSetting.Source, jfsSetting.MountPath, jfsSetting.Storage, options, jfsSetting.Envs) @@ -57,7 +59,7 @@ func (p *ProcessMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig. // 2. create subPath volume volPath := filepath.Join(jfsSetting.MountPath, jfsSetting.SubPath) - klog.V(6).Infof("JCreateVolume: checking %q exists in %v", volPath, jfsSetting.MountPath) + log.V(1).Info("checking exists", "volPath", volPath, "mountPath", jfsSetting.MountPath) var exists bool if err := util.DoWithTimeout(ctx, defaultCheckTimeout, func() (err error) { exists, err = k8sMount.PathExists(volPath) @@ -66,7 +68,7 @@ func (p *ProcessMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig. return fmt.Errorf("could not check volume path %q exists: %v", volPath, err) } if !exists { - klog.V(5).Infof("JCreateVolume: volume not existed, create %s", jfsSetting.MountPath) + log.Info("volume not existed, create it", "mountPath", jfsSetting.MountPath) if err := util.DoWithTimeout(ctx, defaultCheckTimeout, func() (err error) { return os.MkdirAll(volPath, os.FileMode(0777)) }); err != nil { @@ -98,6 +100,7 @@ func (p *ProcessMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig. } func (p *ProcessMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + log := p.log.WithName("JDeleteVolume") // 1. mount juicefs err := p.jmount(ctx, jfsSetting.Source, jfsSetting.MountPath, jfsSetting.Storage, jfsSetting.Options, jfsSetting.Envs) if err != nil { @@ -116,7 +119,7 @@ func (p *ProcessMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig. return fmt.Errorf("could not check volume path %q exists: %v", volPath, err) } else if existed { stdoutStderr, err := p.RmrDir(ctx, volPath, jfsSetting.IsCe) - klog.V(5).Infof("DeleteVol: rmr output is '%s'", stdoutStderr) + log.Info("rmr output", "output", stdoutStderr) if err != nil { return fmt.Errorf("could not delete volume path %q: %v", volPath, err) } @@ -145,15 +148,15 @@ func (p *ProcessMount) JMount(ctx context.Context, _ *jfsConfig.AppInfo, jfsSett func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage string, options []string, extraEnvs map[string]string) error { if !strings.Contains(source, "://") { - klog.V(5).Infof("eeMount: mount %v at %v", source, mountPath) + p.log.Info("eeMount", "source", source, "mountPath", mountPath) err := p.Mount(source, mountPath, jfsConfig.FsType, options) if err != nil { return fmt.Errorf("could not mount %q at %q: %v", source, mountPath, err) } - klog.V(5).Infof("eeMount mount success.") + p.log.Info("eeMount mount success.") return nil } - klog.V(5).Infof("ceMount: mount %v at %v", util.StripPasswd(source), mountPath) + p.log.Info("ceMount", "source", util.StripPasswd(source), "mountPath", mountPath) mountArgs := []string{source, mountPath} if len(options) > 0 { @@ -168,7 +171,7 @@ func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage st }); err != nil { return fmt.Errorf("could not check existence of dir %q: %v", mountPath, err) } else if !exist { - klog.V(5).Infof("jmount: volume not existed, create %s", mountPath) + p.log.Info("volume not existed, create it", "mountPath", mountPath) if err := util.DoWithTimeout(ctx, defaultCheckTimeout, func() (err error) { return os.MkdirAll(mountPath, os.FileMode(0755)) }); err != nil { @@ -185,10 +188,10 @@ func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage st } else if !notMounted { err = p.Unmount(mountPath) if err != nil { - klog.V(5).Infof("Unmount before mount failed: %v", err) + p.log.Info("Unmount before mount failed", "error", err) return err } - klog.V(5).Infof("Unmount %v", mountPath) + p.log.Info("Unmount", "mountPath", mountPath) } envs := append(syscall.Environ(), "JFS_FOREGROUND=1") @@ -213,7 +216,7 @@ func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage st if err == context.DeadlineExceeded || err == context.Canceled { break } - klog.V(5).Infof("Stat mount path %v failed: %v", mountPath, err) + p.log.Info("Stat mount path failed", "mountPath", mountPath, "error", err) time.Sleep(time.Millisecond * 500) continue } @@ -221,9 +224,9 @@ func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage st if st.Ino == 1 { return nil } - klog.V(5).Infof("Mount point %v is not ready", mountPath) + p.log.Info("Mount point is not ready", "mountPath", mountPath) } else { - klog.V(5).Info("Cannot reach here") + p.log.Info("Cannot reach here") } time.Sleep(time.Millisecond * 500) } @@ -231,6 +234,7 @@ func (p *ProcessMount) jmount(ctx context.Context, source, mountPath, storage st } func (p *ProcessMount) GetMountRef(ctx context.Context, target, podName string) (int, error) { + log := p.log.WithName("GetMountRef") var refs []string var corruptedMnt bool @@ -242,7 +246,7 @@ func (p *ProcessMount) GetMountRef(ctx context.Context, target, podName string) }) if err == nil { if !exists { - klog.V(5).Infof("ProcessUmount: %s target not exists", target) + log.Info("target not exists", "target", target) return 0, nil } var notMnt bool @@ -254,7 +258,7 @@ func (p *ProcessMount) GetMountRef(ctx context.Context, target, podName string) return 0, fmt.Errorf("check target path is mountpoint failed: %q", err) } if notMnt { // target exists but not a mountpoint - klog.V(5).Infof("ProcessUmount: %s target not mounted", target) + log.Info("target not mounted", "target", target) return 0, nil } } else if corruptedMnt = k8sMount.IsCorruptedMnt(err); !corruptedMnt { @@ -276,6 +280,7 @@ func (p *ProcessMount) UmountTarget(ctx context.Context, target, podName string) // JUmount umount targetPath func (p *ProcessMount) JUmount(ctx context.Context, target, podName string) error { + log := p.log.WithName("JUmount") var refs []string var corruptedMnt bool @@ -287,7 +292,7 @@ func (p *ProcessMount) JUmount(ctx context.Context, target, podName string) erro }) if err == nil { if !exists { - klog.V(5).Infof("ProcessUmount: %s target not exists", target) + log.Info("target not exists", "target", target) return nil } var notMnt bool @@ -299,7 +304,7 @@ func (p *ProcessMount) JUmount(ctx context.Context, target, podName string) erro return fmt.Errorf("check target path is mountpoint failed: %q", err) } if notMnt { // target exists but not a mountpoint - klog.V(5).Infof("ProcessUmount: %s target not mounted", target) + log.Info("target not mounted", "target", target) return nil } } else if corruptedMnt = k8sMount.IsCorruptedMnt(err); !corruptedMnt { @@ -311,7 +316,7 @@ func (p *ProcessMount) JUmount(ctx context.Context, target, podName string) erro return fmt.Errorf("fail to get mount device refs: %q", err) } - klog.V(5).Infof("ProcessUmount: unmounting target %s", target) + log.Info("unmounting target", "target", target) if err := p.Unmount(target); err != nil { return fmt.Errorf("could not unmount %q: %v", target, err) } @@ -319,9 +324,9 @@ func (p *ProcessMount) JUmount(ctx context.Context, target, podName string) erro // we can only unmount this when only one is left // since the PVC might be used by more than one container if err == nil && len(refs) == 1 { - klog.V(5).Infof("ProcessUmount: unmounting ref %s for target %s", refs[0], target) + log.Info("unmounting ref for target", "ref", refs[0], "target", target) if err = p.Unmount(refs[0]); err != nil { - klog.V(5).Infof("ProcessUmount: error unmounting mount ref %s, %v", refs[0], err) + log.Info("error unmounting mount ref", "ref", refs[0], "error", err) } } return err @@ -340,12 +345,12 @@ func (p *ProcessMount) CleanCache(ctx context.Context, _ string, id string, _ st existed, err = k8sMount.PathExists(rawPath) return }); err != nil { - klog.Errorf("Could not check raw path %q exists: %v", rawPath, err) + p.log.Error(err, "Could not check raw path exists", "rawPath", rawPath) return err } else if existed { err = os.RemoveAll(rawPath) if err != nil { - klog.Errorf("Could not cleanup cache raw path %q: %v", rawPath, err) + p.log.Error(err, "Could not cleanup cache raw path", "rawPath", rawPath, "error", err) return err } } @@ -354,7 +359,7 @@ func (p *ProcessMount) CleanCache(ctx context.Context, _ string, id string, _ st } func (p *ProcessMount) RmrDir(ctx context.Context, directory string, isCeMount bool) ([]byte, error) { - klog.V(5).Infof("RmrDir: removing directory recursively: %q", directory) + p.log.Info("removing directory recursively", "directory", directory) if isCeMount { return p.Exec.CommandContext(ctx, jfsConfig.CeCliPath, "rmr", directory).CombinedOutput() } diff --git a/pkg/juicefs/mount/process_mount_test.go b/pkg/juicefs/mount/process_mount_test.go index 00974b5058..b575be441b 100644 --- a/pkg/juicefs/mount/process_mount_test.go +++ b/pkg/juicefs/mount/process_mount_test.go @@ -29,6 +29,7 @@ import ( . "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" . "github.com/smartystreets/goconvey/convey" + "k8s.io/klog/v2" k8sexec "k8s.io/utils/exec" k8sMount "k8s.io/utils/mount" @@ -52,6 +53,7 @@ func TestNewProcessMount(t *testing.T) { setting: nil, }, want: &ProcessMount{ + klog.NewKlogr().WithName("process-mount"), k8sMount.SafeFormatAndMount{ Interface: k8sMount.New(""), Exec: k8sexec.New(), @@ -99,6 +101,7 @@ func TestProcessMount_JUmount(t *testing.T) { } mockMounter.EXPECT().Unmount(targetPath).Return(nil) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } if err := p.JUmount(context.TODO(), targetPath, podName); err != nil { @@ -111,6 +114,7 @@ func TestProcessMount_JUmount(t *testing.T) { }) defer patch1.Reset() p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: k8sMount.SafeFormatAndMount{}, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -143,6 +147,7 @@ func TestProcessMount_JUmount(t *testing.T) { } mockMounter.EXPECT().Unmount(targetPath).Return(errors.New("test")) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -176,6 +181,7 @@ func TestProcessMount_JMount(t *testing.T) { } mockMounter.EXPECT().Mount(eeSource, targetPath, jfsConfig.FsType, nil).Return(nil) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } if err := p.JMount(context.TODO(), nil, &jfsConfig.JfsSetting{ @@ -204,6 +210,7 @@ func TestProcessMount_JMount(t *testing.T) { } mockMounter.EXPECT().Mount(eeSource, targetPath, jfsConfig.FsType, nil).Return(errors.New("test")) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -259,6 +266,7 @@ func TestProcessMount_JMount(t *testing.T) { mockMounter.EXPECT().IsLikelyNotMountPoint(targetPath).Return(false, nil) mockMounter.EXPECT().Unmount(targetPath).Return(nil) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } if err := p.JMount(context.TODO(), nil, &jfsConfig.JfsSetting{ @@ -300,6 +308,7 @@ func TestProcessMount_JMount(t *testing.T) { } mockMounter.EXPECT().IsLikelyNotMountPoint(targetPath).Return(true, nil) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -343,6 +352,7 @@ func TestProcessMount_JMount(t *testing.T) { } mockMounter.EXPECT().IsLikelyNotMountPoint(targetPath).Return(true, nil) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -362,6 +372,7 @@ func TestProcessMount_JMount(t *testing.T) { }) defer patch1.Reset() p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: k8sMount.SafeFormatAndMount{}, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -384,6 +395,7 @@ func TestProcessMount_JMount(t *testing.T) { }) defer patch2.Reset() p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: k8sMount.SafeFormatAndMount{}, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -411,6 +423,7 @@ func TestProcessMount_JMount(t *testing.T) { } mockMounter.EXPECT().IsLikelyNotMountPoint(targetPath).Return(false, errors.New("test")) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -439,6 +452,7 @@ func TestProcessMount_JMount(t *testing.T) { mockMounter.EXPECT().IsLikelyNotMountPoint(targetPath).Return(false, nil) mockMounter.EXPECT().Unmount(targetPath).Return(errors.New("test")) p := &ProcessMount{ + log: klog.NewKlogr(), SafeFormatAndMount: *mounter, } diff --git a/pkg/k8sclient/client.go b/pkg/k8sclient/client.go index 178159db3c..0b0c644af6 100644 --- a/pkg/k8sclient/client.go +++ b/pkg/k8sclient/client.go @@ -41,13 +41,15 @@ import ( "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( timeout = 10 * time.Second ) +var clientLog = klog.NewKlogr().WithName("client") + type PatchListValue struct { Op string `json:"op"` Path string `json:"path"` @@ -114,30 +116,30 @@ func NewClient() (*K8sClient, error) { func (k *K8sClient) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { if pod == nil { - klog.V(5).Info("Create pod: pod is nil") + clientLog.Info("Create pod: pod is nil") return nil, nil } - klog.V(6).Infof("Create pod %s", pod.Name) + clientLog.V(1).Info("Create pod", "name", pod.Name) mntPod, err := k.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { - klog.V(5).Infof("Can't create pod %s: %v", pod.Name, err) + clientLog.Info("Can't create pod", "name", pod.Name, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetPod(ctx context.Context, podName, namespace string) (*corev1.Pod, error) { - klog.V(6).Infof("Get pod %s", podName) + clientLog.V(1).Info("Get pod", "name", podName) mntPod, err := k.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get pod %s namespace %s: %v", podName, namespace, err) + clientLog.V(1).Info("Can't get pod", "name", podName, "namespace", namespace, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) ListPod(ctx context.Context, namespace string, labelSelector *metav1.LabelSelector, filedSelector *fields.Set) ([]corev1.Pod, error) { - klog.V(6).Infof("List pod by labelSelector %v, fieldSelector %v", labelSelector, filedSelector) + clientLog.V(1).Info("List pod", "labelSelector", labelSelector.String(), "fieldSelector", filedSelector) listOptions := metav1.ListOptions{} if k.enableAPIServerListCache { // set ResourceVersion="0" means the list response is returned from apiserver cache instead of etcd @@ -156,14 +158,14 @@ func (k *K8sClient) ListPod(ctx context.Context, namespace string, labelSelector podList, err := k.CoreV1().Pods(namespace).List(ctx, listOptions) if err != nil { - klog.V(6).Infof("Can't list pod in namespace %s by labelSelector %v: %v", namespace, labelSelector, err) + clientLog.V(1).Info("Can't list pod", "namespace", namespace, "labelSelector", labelSelector.String(), "error", err) return nil, err } return podList.Items, nil } func (k *K8sClient) ListNode(ctx context.Context, labelSelector *metav1.LabelSelector) ([]corev1.Node, error) { - klog.V(6).Infof("List node by labelSelector %v", labelSelector) + clientLog.V(1).Info("List node by labelSelector", "labelSelector", labelSelector.String()) listOptions := metav1.ListOptions{} if labelSelector != nil { labelMap, err := metav1.LabelSelectorAsSelector(labelSelector) @@ -175,14 +177,14 @@ func (k *K8sClient) ListNode(ctx context.Context, labelSelector *metav1.LabelSel nodeList, err := k.CoreV1().Nodes().List(ctx, listOptions) if err != nil { - klog.V(6).Infof("Can't list node by labelSelector %v: %v", labelSelector, err) + clientLog.V(1).Info("Can't list node", "labelSelector", labelSelector.String(), "error", err) return nil, err } return nodeList.Items, nil } func (k *K8sClient) GetPodLog(ctx context.Context, podName, namespace, containerName string) (string, error) { - klog.V(6).Infof("Get pod %s log", podName) + clientLog.V(1).Info("Get pod log", "name", podName) tailLines := int64(20) req := k.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ Container: containerName, @@ -205,38 +207,38 @@ func (k *K8sClient) GetPodLog(ctx context.Context, podName, namespace, container func (k *K8sClient) PatchPod(ctx context.Context, pod *corev1.Pod, data []byte, pt types.PatchType) error { if pod == nil { - klog.V(5).Info("Patch pod: pod is nil") + clientLog.Info("Patch pod: pod is nil") return nil } - klog.V(6).Infof("Patch pod %v", pod.Name) + clientLog.V(1).Info("Patch pod", "name", pod.Name) _, err := k.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, pt, data, metav1.PatchOptions{}) return err } func (k *K8sClient) UpdatePod(ctx context.Context, pod *corev1.Pod) error { if pod == nil { - klog.V(5).Info("Update pod: pod is nil") + clientLog.Info("Update pod: pod is nil") return nil } - klog.V(6).Infof("Update pod %v", pod.Name) + clientLog.V(1).Info("Update pod", "name", pod.Name) _, err := k.CoreV1().Pods(pod.Namespace).Update(ctx, pod, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeletePod(ctx context.Context, pod *corev1.Pod) error { if pod == nil { - klog.V(5).Info("Delete pod: pod is nil") + clientLog.Info("Delete pod: pod is nil") return nil } - klog.V(6).Infof("Delete pod %v", pod.Name) + clientLog.V(1).Info("Delete pod", "name", pod.Name) return k.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) } func (k *K8sClient) GetSecret(ctx context.Context, secretName, namespace string) (*corev1.Secret, error) { - klog.V(6).Infof("Get secret %s", secretName) + clientLog.V(1).Info("Get secret", "name", secretName) secret, err := k.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get secret %s namespace %s: %v", secretName, namespace, err) + clientLog.V(1).Info("Can't get secret", "name", secretName, "namespace", namespace, "error", err) return nil, err } return secret, nil @@ -244,48 +246,48 @@ func (k *K8sClient) GetSecret(ctx context.Context, secretName, namespace string) func (k *K8sClient) CreateSecret(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) { if secret == nil { - klog.V(5).Info("Create secret: secret is nil") + clientLog.Info("Create secret: secret is nil") return nil, nil } - klog.V(6).Infof("Create secret %s", secret.Name) - secret, err := k.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + clientLog.V(1).Info("Create secret", "name", secret.Name) + s, err := k.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) if err != nil { - klog.V(5).Infof("Can't create secret %s: %v", secret.Name, err) + clientLog.Info("Can't create secret", "name", secret.Name, "error", err) return nil, err } - return secret, nil + return s, nil } func (k *K8sClient) UpdateSecret(ctx context.Context, secret *corev1.Secret) error { if secret == nil { - klog.V(5).Info("Update secret: secret is nil") + clientLog.Info("Update secret: secret is nil") return nil } - klog.V(6).Infof("Update secret %v", secret.Name) + clientLog.V(1).Info("Update secret", "name", secret.Name) _, err := k.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeleteSecret(ctx context.Context, secretName string, namespace string) error { - klog.V(6).Infof("Delete secret %s", secretName) + clientLog.V(1).Info("Delete secret", "name", secretName) return k.CoreV1().Secrets(namespace).Delete(ctx, secretName, metav1.DeleteOptions{}) } func (k *K8sClient) PatchSecret(ctx context.Context, secret *corev1.Secret, data []byte, pt types.PatchType) error { if secret == nil { - klog.V(5).Info("Patch secret: secret is nil") + clientLog.Info("Patch secret: secret is nil") return nil } - klog.V(6).Infof("Patch secret %v", secret.Name) + clientLog.V(1).Info("Patch secret", "name", secret.Name) _, err := k.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, pt, data, metav1.PatchOptions{}) return err } func (k *K8sClient) GetJob(ctx context.Context, jobName, namespace string) (*batchv1.Job, error) { - klog.V(6).Infof("Get job %s", jobName) + clientLog.V(1).Info("Get job", "name", jobName) job, err := k.BatchV1().Jobs(namespace).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get job %s namespace %s: %v", jobName, namespace, err) + clientLog.V(1).Info("Can't get job", "name", jobName, "namespace", namespace, "error", err) return nil, err } return job, nil @@ -293,13 +295,13 @@ func (k *K8sClient) GetJob(ctx context.Context, jobName, namespace string) (*bat func (k *K8sClient) CreateJob(ctx context.Context, job *batchv1.Job) (*batchv1.Job, error) { if job == nil { - klog.V(5).Info("Create job: job is nil") + clientLog.Info("Create job: job is nil") return nil, nil } - klog.V(6).Infof("Create job %s", job.Name) + clientLog.V(1).Info("Create job", "name", job.Name) created, err := k.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}) if err != nil { - klog.V(5).Infof("Can't create job %s: %v", job.Name, err) + clientLog.Info("Can't create job", "name", job.Name, "error", err) return nil, err } return created, nil @@ -307,16 +309,16 @@ func (k *K8sClient) CreateJob(ctx context.Context, job *batchv1.Job) (*batchv1.J func (k *K8sClient) UpdateJob(ctx context.Context, job *batchv1.Job) error { if job == nil { - klog.V(5).Info("Update job: job is nil") + clientLog.Info("Update job: job is nil") return nil } - klog.V(6).Infof("Update job %v", job.Name) + clientLog.V(1).Info("Update job", "name", job.Name) _, err := k.BatchV1().Jobs(job.Namespace).Update(ctx, job, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeleteJob(ctx context.Context, jobName string, namespace string) error { - klog.V(6).Infof("Delete job %s", jobName) + clientLog.V(1).Info("Delete job", "name", jobName) policy := metav1.DeletePropagationBackground return k.BatchV1().Jobs(namespace).Delete(ctx, jobName, metav1.DeleteOptions{ PropagationPolicy: &policy, @@ -324,17 +326,17 @@ func (k *K8sClient) DeleteJob(ctx context.Context, jobName string, namespace str } func (k *K8sClient) GetPersistentVolume(ctx context.Context, pvName string) (*corev1.PersistentVolume, error) { - klog.V(6).Infof("Get pv %s", pvName) + clientLog.V(1).Info("Get pv", "name", pvName) pv, err := k.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get pv %s : %v", pvName, err) + clientLog.V(1).Info("Can't get pv", "name", pvName, "error", err) return nil, err } return pv, nil } func (k *K8sClient) ListPersistentVolumes(ctx context.Context, labelSelector *metav1.LabelSelector, filedSelector *fields.Set) ([]corev1.PersistentVolume, error) { - klog.V(6).Infof("List pvs by labelSelector %v, fieldSelector %v", labelSelector, filedSelector) + clientLog.V(1).Info("List pvs", "labelSelector", labelSelector.String(), "fieldSelector", filedSelector) listOptions := metav1.ListOptions{} if labelSelector != nil { labelMap, err := metav1.LabelSelectorAsMap(labelSelector) @@ -348,7 +350,7 @@ func (k *K8sClient) ListPersistentVolumes(ctx context.Context, labelSelector *me } pvList, err := k.CoreV1().PersistentVolumes().List(ctx, listOptions) if err != nil { - klog.V(6).Infof("Can't list pv: %v", err) + clientLog.V(1).Info("Can't list pv", "error", err) return nil, err } return pvList.Items, nil @@ -370,67 +372,67 @@ func (k *K8sClient) ListPersistentVolumesByVolumeHandle(ctx context.Context, vol } func (k *K8sClient) ListStorageClasses(ctx context.Context) ([]storagev1.StorageClass, error) { - klog.V(6).Info("List storageclass") + clientLog.V(1).Info("List storageclass") scList, err := k.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { - klog.V(6).Infof("Can't list pv: %v", err) + clientLog.V(1).Info("Can't list pv", "error", err) return nil, err } return scList.Items, nil } func (k *K8sClient) GetPersistentVolumeClaim(ctx context.Context, pvcName, namespace string) (*corev1.PersistentVolumeClaim, error) { - klog.V(6).Infof("Get pvc %s in namespace %s", pvcName, namespace) + clientLog.V(1).Info("Get pvc", "name", pvcName, "namespace", namespace) mntPod, err := k.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get pvc %s in namespace %s : %v", pvcName, namespace, err) + clientLog.V(1).Info("Can't get pvc", "name", pvcName, "namespace", namespace, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetReplicaSet(ctx context.Context, rsName, namespace string) (*appsv1.ReplicaSet, error) { - klog.V(6).Infof("Get replicaset %s in namespace %s", rsName, namespace) + clientLog.V(1).Info("Get replicaset", "name", rsName, "namespace", namespace) rs, err := k.AppsV1().ReplicaSets(namespace).Get(ctx, rsName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get replicaset %s in namespace %s : %v", rsName, namespace, err) + clientLog.V(1).Info("Can't get replicaset", "name", rsName, "namespace", namespace, "error", err) return nil, err } return rs, nil } func (k *K8sClient) GetStatefulSet(ctx context.Context, stsName, namespace string) (*appsv1.StatefulSet, error) { - klog.V(6).Infof("Get statefulset %s in namespace %s", stsName, namespace) + clientLog.V(1).Info("Get statefulset", "name", stsName, "namespace", namespace) sts, err := k.AppsV1().StatefulSets(namespace).Get(ctx, stsName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get statefulset %s in namespace %s : %v", stsName, namespace, err) + clientLog.V(1).Info("Can't get statefulset", "name", stsName, "namespace", namespace, "error", err) return nil, err } return sts, nil } func (k *K8sClient) GetStorageClass(ctx context.Context, scName string) (*storagev1.StorageClass, error) { - klog.V(6).Infof("Get sc %s", scName) + clientLog.V(1).Info("Get sc", "name", scName) mntPod, err := k.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) if err != nil { - klog.V(6).Infof("Can't get sc %s : %v", scName, err) + clientLog.V(1).Info("Can't get sc", "name", scName, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetDaemonSet(ctx context.Context, dsName, namespace string) (*appsv1.DaemonSet, error) { - klog.V(6).Infof("Get ds %s", dsName) + clientLog.V(1).Info("Get ds", "name", dsName) ds, err := k.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{}) if err != nil { - klog.V(5).Infof("Can't get DaemonSet %s: %v", dsName, err) + clientLog.Info("Can't get DaemonSet", "name", dsName, "error", err) return nil, err } return ds, nil } func (k *K8sClient) ExecuteInContainer(podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { - klog.V(6).Infof("Execute command %v in container %s in pod %s in namespace %s", cmd, containerName, podName, namespace) + clientLog.V(1).Info("Execute command pod", "command", cmd, "container", containerName, "pod", podName, "namespace", namespace) const tty = false req := k.CoreV1().RESTClient().Post(). diff --git a/pkg/k8sclient/kubelet_client.go b/pkg/k8sclient/kubelet_client.go index 47c93377da..bf3e2343b2 100644 --- a/pkg/k8sclient/kubelet_client.go +++ b/pkg/k8sclient/kubelet_client.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest" "k8s.io/client-go/transport" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( @@ -38,6 +38,8 @@ const ( serviceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" ) +var kubeletLog = klog.NewKlogr().WithName("kubelet-client") + type KubeletClient struct { host string port int @@ -170,7 +172,7 @@ func (kc *KubeletClient) GetNodeRunningPods() (*corev1.PodList, error) { } podLists := &corev1.PodList{} if err = json.Unmarshal(body, &podLists); err != nil { - klog.V(5).Infof("GetNodeRunningPods err: %s", body) + kubeletLog.Error(err, "GetNodeRunningPods err", "body", body) return nil, err } return podLists, err diff --git a/pkg/util/log.go b/pkg/util/log.go new file mode 100644 index 0000000000..e8fa8fca2a --- /dev/null +++ b/pkg/util/log.go @@ -0,0 +1,41 @@ +/* + Copyright 2024 Juicedata Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package util + +import ( + "context" + + "k8s.io/klog/v2" +) + +const LogKey = "jfs-logger" + +type LoggerType string + +func WithLog(parentCtx context.Context, log klog.Logger) context.Context { + return context.WithValue(parentCtx, LoggerType(LogKey), log) +} + +func GenLog(ctx context.Context, log klog.Logger, name string) klog.Logger { + if ctx.Value(LoggerType(LogKey)) != nil { + log = ctx.Value(LoggerType(LogKey)).(klog.Logger) + } + if name != "" { + log = log.WithName(name) + } + return log +} diff --git a/pkg/util/resource/namespace.go b/pkg/util/resource/namespace.go index 93f9d6d418..72d2d06964 100644 --- a/pkg/util/resource/namespace.go +++ b/pkg/util/resource/namespace.go @@ -23,11 +23,16 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) +var ( + resourceLog = klog.NewKlogr().WithName("util") +) + // GetNamespace get juicefs pv & pvc from pod when pod namespace is empty func GetNamespace(ctx context.Context, client *k8sclient.K8sClient, pod *corev1.Pod) (namespace string, err error) { namespace = pod.Namespace diff --git a/pkg/util/resource/pod.go b/pkg/util/resource/pod.go index e7caa80368..e2700a7986 100644 --- a/pkg/util/resource/pod.go +++ b/pkg/util/resource/pod.go @@ -28,7 +28,6 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -113,17 +112,18 @@ func RemoveFinalizer(ctx context.Context, client *k8sclient.K8sClient, pod *core }} payloadBytes, err := json.Marshal(payload) if err != nil { - klog.Errorf("Parse json error: %v", err) + resourceLog.Error(err, "Parse json error") return err } if err := client.PatchPod(ctx, pod, payloadBytes, types.JSONPatchType); err != nil { - klog.Errorf("Patch pod err:%v", err) + resourceLog.Error(err, "Patch pod err") return err } return nil } func AddPodLabel(ctx context.Context, client *k8sclient.K8sClient, pod *corev1.Pod, addLabels map[string]string) error { + log := util.GenLog(ctx, resourceLog, "AddPodLabel") payloads := map[string]interface{}{ "metadata": map[string]interface{}{ "labels": addLabels, @@ -132,12 +132,12 @@ func AddPodLabel(ctx context.Context, client *k8sclient.K8sClient, pod *corev1.P payloadBytes, err := json.Marshal(payloads) if err != nil { - klog.Errorf("Parse json error: %v", err) + log.Error(err, "Parse json error") return err } - klog.V(6).Infof("AddPodLabel: %v in pod %s", addLabels, pod.Name) + log.V(1).Info("add labels in pod", "labels", addLabels, "pod", pod.Name) if err := client.PatchPod(ctx, pod, payloadBytes, types.StrategicMergePatchType); err != nil { - klog.Errorf("Patch pod %s error: %v", pod.Name, err) + log.Error(err, "Patch pod error", "podName", pod.Name) return err } return nil @@ -151,12 +151,12 @@ func AddPodAnnotation(ctx context.Context, client *k8sclient.K8sClient, pod *cor } payloadBytes, err := json.Marshal(payloads) if err != nil { - klog.Errorf("Parse json error: %v", err) + resourceLog.Error(err, "Parse json error") return err } - klog.V(6).Infof("AddPodAnnotation: %v in pod %s", addAnnotations, pod.Name) + resourceLog.V(1).Info("add annotation in pod", "annotations", addAnnotations, "podName", pod.Name) if err := client.PatchPod(ctx, pod, payloadBytes, types.StrategicMergePatchType); err != nil { - klog.Errorf("Patch pod %s error: %v", pod.Name, err) + resourceLog.Error(err, "Patch pod error", "podName", pod.Name) return err } return nil @@ -172,12 +172,12 @@ func DelPodAnnotation(ctx context.Context, client *k8sclient.K8sClient, pod *cor } payloadBytes, err := json.Marshal(payloads) if err != nil { - klog.Errorf("Parse json error: %v", err) + resourceLog.Error(err, "Parse json error") return err } - klog.V(6).Infof("Remove annotations: %v of pod %s", delAnnotations, pod.Name) + resourceLog.V(1).Info("remove annotations of pod", "annotations", delAnnotations, "podName", pod.Name) if err := client.PatchPod(ctx, pod, payloadBytes, types.JSONPatchType); err != nil { - klog.Errorf("Patch pod %s error: %v", pod.Name, err) + resourceLog.Error(err, "Patch pod error", "podName", pod.Name) return err } return nil @@ -191,12 +191,12 @@ func ReplacePodAnnotation(ctx context.Context, client *k8sclient.K8sClient, pod }} payloadBytes, err := json.Marshal(payload) if err != nil { - klog.Errorf("Parse json error: %v", err) + resourceLog.Error(err, "Parse json error") return err } - klog.V(6).Infof("Replace annotations: %v of pod %s", annotation, pod.Name) + resourceLog.V(1).Info("Replace annotations of pod", "annotations", annotation, "podName", pod.Name) if err := client.PatchPod(ctx, pod, payloadBytes, types.JSONPatchType); err != nil { - klog.Errorf("Patch pod %s error: %v", pod.Name, err) + resourceLog.Error(err, "Patch pod error", "podName", pod.Name) return err } return nil @@ -213,10 +213,11 @@ func GetAllRefKeys(pod corev1.Pod) map[string]string { } func WaitUtilMountReady(ctx context.Context, podName, mntPath string, timeout time.Duration) error { + log := util.GenLog(ctx, resourceLog, "") waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() // Wait until the mount point is ready - klog.V(5).Infof("waiting for mount point %v ready, mountpod: %s", mntPath, podName) + log.Info("waiting for mount point ready", "podName", podName) for { var finfo os.FileInfo if err := util.DoWithTimeout(waitCtx, timeout, func() (err error) { @@ -226,7 +227,7 @@ func WaitUtilMountReady(ctx context.Context, podName, mntPath string, timeout ti if err == context.Canceled || err == context.DeadlineExceeded { break } - klog.V(6).Infof("Mount path %v is not ready, mountpod: %s, err: %v", mntPath, podName, err) + log.V(1).Info("Mount path is not ready, wait for it.", "mountPath", mntPath, "podName", podName, "error", err) time.Sleep(time.Millisecond * 500) continue } @@ -235,15 +236,15 @@ func WaitUtilMountReady(ctx context.Context, podName, mntPath string, timeout ti if st.Ino == 1 { dev = uint64(st.Dev) util.DevMinorTableStore(mntPath, dev) - klog.V(5).Infof("Mount point %v is ready, mountpod: %s", mntPath, podName) + log.Info("Mount point is ready", "podName", podName) return nil } - klog.V(6).Infof("Mount point %v is not ready, mountpod: %s", mntPath, podName) + log.V(1).Info("Mount point is not ready, wait for it", "podName", podName) } time.Sleep(time.Millisecond * 500) } - return fmt.Errorf("mount point %v is not ready, mountpod: %s", mntPath, podName) + return fmt.Errorf("mount point is not ready eventually, mountpod: %s", podName) } func ShouldDelay(ctx context.Context, pod *corev1.Pod, Client *k8s.K8sClient) (shouldDelay bool, err error) { @@ -257,20 +258,20 @@ func ShouldDelay(ctx context.Context, pod *corev1.Pod, Client *k8s.K8sClient) (s // need to add delayAt annotation d, err := util.GetTimeAfterDelay(delayStr) if err != nil { - klog.Errorf("delayDelete: can't parse delay time %s: %v", d, err) + resourceLog.Error(err, "delayDelete: can't parse delay time", "time", d) return false, nil } addAnnotation := map[string]string{config.DeleteDelayAtKey: d} - klog.Infof("delayDelete: add annotation %v to pod %s", addAnnotation, pod.Name) + resourceLog.Info("delayDelete: add annotation to pod", "annotations", addAnnotation, "podName", pod.Name) if err := AddPodAnnotation(ctx, Client, pod, addAnnotation); err != nil { - klog.Errorf("delayDelete: Update pod %s error: %v", pod.Name, err) + resourceLog.Error(err, "delayDelete: Update pod error", "podName", pod.Name) return true, err } return true, nil } delayAt, err := util.GetTime(delayAtStr) if err != nil { - klog.Errorf("delayDelete: can't parse delayAt %s: %v", delayAtStr, err) + resourceLog.Error(err, "delayDelete: can't parse delayAt", "delayAt", delayAtStr) return false, nil } return time.Now().Before(delayAt), nil diff --git a/pkg/util/resource/pvc.go b/pkg/util/resource/pvc.go index 2a9f678198..42f204424c 100644 --- a/pkg/util/resource/pvc.go +++ b/pkg/util/resource/pvc.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" v1 "k8s.io/api/core/v1" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/pkg/config" k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -117,7 +116,7 @@ func CheckForSubPath(ctx context.Context, client *k8s.K8sClient, volume *v1.Pers } subPath := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes["subPath"] if subPath == nowSubPath { - klog.V(6).Infof("PV %s uses the same subPath %s", pv.Name, subPath) + resourceLog.V(1).Info("PV uses the same subPath", "pvName", pv.Name, "subPath", subPath) return false, nil } } @@ -139,7 +138,7 @@ func (meta *ObjectMeta) ResolveSecret(str string, pvName string) string { return av } } - klog.Errorf("Cannot resolve %s. replace it with an empty string", k) + resourceLog.Info("Cannot resolve. replace it with an empty string", "key", k) return "" }) return resolved @@ -150,7 +149,7 @@ func CheckForSecretFinalizer(ctx context.Context, client *k8s.K8sClient, volume secretNamespace := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretNamespace] secretName := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretName] if sc == "" || secretNamespace == "" || secretName == "" { - klog.V(5).Infof("Cannot check for the secret, storageclass: %s, secretNamespace: %s, secretName: %s", sc, secretNamespace, secretName) + resourceLog.Info("Cannot check for the secret", "storageClass", sc, "secretNamespace", secretNamespace, "secretName", secretName) return false, nil } // get all pvs @@ -166,7 +165,7 @@ func CheckForSecretFinalizer(ctx context.Context, client *k8s.K8sClient, volume pvSecretName := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretName] // Cannot remove the secret if it is used by another pv if secretNamespace == pvSecretNamespace && secretName == pvSecretName { - klog.V(5).Infof("PV %s uses the same secret %s/%s", pv.Name, pvSecretNamespace, pvSecretName) + resourceLog.Info("PV uses the same secret", "pvName", pv.Name, "pv secret namespace", pvSecretNamespace, "pv secret name", pvSecretName) return false, nil } } @@ -182,11 +181,11 @@ func patchSecretFinalizer(ctx context.Context, client *k8s.K8sClient, secret *v1 }} payloadBytes, err := json.Marshal(payload) if err != nil { - klog.Errorf("Parse json error: %v", err) + resourceLog.Error(err, "Parse json error") return err } if err := client.PatchSecret(ctx, secret, payloadBytes, types.JSONPatchType); err != nil { - klog.Errorf("Patch secret err:%v", err) + resourceLog.Error(err, "Patch secret err") return err } return nil diff --git a/pkg/util/resource/terminal.go b/pkg/util/resource/terminal.go index 3cfc4a88f5..a039f38ed4 100644 --- a/pkg/util/resource/terminal.go +++ b/pkg/util/resource/terminal.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/klog" ) const ( @@ -106,7 +105,7 @@ func (t *terminalSession) checkHeartbeat(ctx context.Context) { return default: if time.Since(t.lastHeartbeatAt) > 1*time.Minute { - klog.Error("Terminal session heartbeat timeout") + resourceLog.Info("Terminal session heartbeat timeout") t.conn.Close() return } @@ -137,7 +136,7 @@ func ExecInPod(client kubernetes.Interface, cfg *rest.Config, h Handler, namespa executor, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) if err != nil { - klog.Error("Failed to create SPDY executor: ", err) + resourceLog.Error(err, "Failed to create SPDY executor") return err } if err := executor.Stream(remotecommand.StreamOptions{ @@ -147,7 +146,7 @@ func ExecInPod(client kubernetes.Interface, cfg *rest.Config, h Handler, namespa TerminalSizeQueue: h, Tty: true, }); err != nil { - klog.Error("Failed to stream: ", err) + resourceLog.Error(err, "Failed to stream") return err } @@ -168,14 +167,14 @@ func DownloadPodFile(client kubernetes.Interface, cfg *rest.Config, writer io.Wr executor, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) if err != nil { - klog.Error("Failed to create SPDY executor: ", err) + resourceLog.Error(err, "Failed to create SPDY executor") return err } if err := executor.Stream(remotecommand.StreamOptions{ Stdout: writer, Stderr: writer, }); err != nil { - klog.Error("Failed to stream: ", err) + resourceLog.Error(err, "Failed to stream") return err } diff --git a/pkg/util/resource/volume.go b/pkg/util/resource/volume.go index 9b5295fb20..3e9e8eca69 100644 --- a/pkg/util/resource/volume.go +++ b/pkg/util/resource/volume.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/klog" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -37,7 +36,7 @@ type PVPair struct { // GetVolumes get juicefs pv & pvc from pod func GetVolumes(ctx context.Context, client *k8sclient.K8sClient, pod *corev1.Pod) (used bool, pvPairGot []PVPair, err error) { - klog.V(6).Infof("Volumes of pod %s: %v", pod.Name, pod.Spec.Volumes) + resourceLog.V(1).Info("Volumes of pod", "podName", pod.Name, "volumes", pod.Spec.Volumes) var namespace string pvPairGot = []PVPair{} namespace, err = GetNamespace(ctx, client, pod) @@ -46,7 +45,7 @@ func GetVolumes(ctx context.Context, client *k8sclient.K8sClient, pod *corev1.Po } pod.Namespace = namespace pvPairGot, err = getVol(ctx, client, pod, namespace) - klog.V(6).Infof("pvPairGot: %v", pvPairGot) + resourceLog.V(1).Info("get pv pair", "pv pair", pvPairGot) used = len(pvPairGot) != 0 return } diff --git a/pkg/util/util.go b/pkg/util/util.go index 31fc75ec0d..3b0f2ebe02 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -35,7 +35,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/utils/io" ) @@ -45,6 +45,10 @@ const ( procMountInfoPath = "/proc/self/mountinfo" ) +var ( + utilLog = klog.NewKlogr().WithName("util") +) + func init() { rand.Seed(time.Now().UnixNano()) } @@ -345,12 +349,13 @@ func CheckDynamicPV(name string) (bool, error) { } func UmountPath(ctx context.Context, sourcePath string) { + log := GenLog(ctx, utilLog, "Umount") out, err := exec.CommandContext(ctx, "umount", "-l", sourcePath).CombinedOutput() if err != nil && !strings.Contains(string(out), "not mounted") && !strings.Contains(string(out), "mountpoint not found") && !strings.Contains(string(out), "no mount point specified") { - klog.Errorf("Could not lazy unmount %q: %v, output: %s", sourcePath, err, string(out)) + log.Error(err, "Could not lazy unmount", "path", sourcePath, "out", string(out)) } } @@ -399,7 +404,7 @@ func GetDiskUsage(path string) (uint64, uint64, uint64, uint64) { freeFiles := stat.Ffree return totalSize, freeSize, totalFiles, freeFiles } else { - klog.Errorf("GetDiskUsage: syscall.Statfs failed: %v", err) + utilLog.Error(err, "GetDiskUsage: syscall.Statfs failed") return 1, 1, 1, 1 } } diff --git a/pkg/webhook/handler/handler.go b/pkg/webhook/handler/handler.go index 5481309ac9..43cc6ca333 100644 --- a/pkg/webhook/handler/handler.go +++ b/pkg/webhook/handler/handler.go @@ -23,7 +23,7 @@ import ( "net/http" corev1 "k8s.io/api/core/v1" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/juicedata/juicefs-csi-driver/pkg/config" @@ -43,6 +43,10 @@ type SidecarHandler struct { serverless bool } +var ( + handlerLog = klog.NewKlogr().WithName("sidecar-handler") +) + func NewSidecarHandler(client *k8sclient.K8sClient, serverless bool) *SidecarHandler { return &SidecarHandler{ Client: client, @@ -53,38 +57,38 @@ func NewSidecarHandler(client *k8sclient.K8sClient, serverless bool) *SidecarHan func (s *SidecarHandler) Handle(ctx context.Context, request admission.Request) admission.Response { pod := &corev1.Pod{} raw := request.Object.Raw - klog.V(6).Infof("[SidecarHandler] get pod: %s", string(raw)) + handlerLog.V(1).Info("get pod", "pod", string(raw)) err := s.decoder.Decode(request, pod) if err != nil { - klog.Error(err, "unable to decoder pod from req") + handlerLog.Error(err, "unable to decoder pod from req") return admission.Errored(http.StatusBadRequest, err) } // check if pod has done label if util.CheckExpectValue(pod.Labels, config.InjectSidecarDone, config.True) { - klog.Infof("[SidecarHandler] skip mutating the pod because injection is done. Pod %s namespace %s", pod.Name, pod.Namespace) + handlerLog.Info("skip mutating the pod because injection is done.", "name", pod.Name, "namespace", pod.Namespace) return admission.Allowed("skip mutating the pod because injection is done") } // check if pod has disable label if util.CheckExpectValue(pod.Labels, config.InjectSidecarDisable, config.True) { - klog.Infof("[SidecarHandler] skip mutating the pod because injection is disabled. Pod %s namespace %s", pod.Name, pod.Namespace) + handlerLog.Info("skip mutating the pod because injection is disabled.", "name", pod.Name, "namespace", pod.Namespace) return admission.Allowed("skip mutating the pod because injection is disabled") } // check if pod use JuiceFS Volume used, pair, err := resource.GetVolumes(ctx, s.Client, pod) if err != nil { - klog.Errorf("[SidecarHandler] get pv from pod %s namespace %s err: %v", pod.Name, pod.Namespace, err) + handlerLog.Error(err, "get pv from pod", "name", pod.Name, "namespace", pod.Namespace) return admission.Errored(http.StatusBadRequest, err) } else if !used { - klog.Infof("[SidecarHandler] skip mutating the pod because it doesn't use JuiceFS Volume. Pod %s namespace %s", pod.Name, pod.Namespace) + handlerLog.Info("skip mutating the pod because it doesn't use JuiceFS Volume.", "name", pod.Name, "namespace", pod.Namespace) return admission.Allowed("skip mutating the pod because it doesn't use JuiceFS Volume") } jfs := juicefs.NewJfsProvider(nil, s.Client) sidecarMutate := mutate.NewSidecarMutate(s.Client, jfs, s.serverless, pair) - klog.Infof("[SidecarHandler] start injecting juicefs client as sidecar in pod [%s] namespace [%s].", pod.Name, pod.Namespace) + handlerLog.Info("start injecting juicefs client as sidecar in pod", "name", pod.Name, "namespace", pod.Namespace) out, err := sidecarMutate.Mutate(ctx, pod) if err != nil { return admission.Errored(http.StatusBadRequest, err) @@ -93,10 +97,10 @@ func (s *SidecarHandler) Handle(ctx context.Context, request admission.Request) marshaledPod, err := json.Marshal(pod) if err != nil { - klog.Error(err, "unable to marshal pod") + handlerLog.Error(err, "unable to marshal pod") return admission.Errored(http.StatusInternalServerError, err) } - klog.V(6).Infof("[SidecarHandler] mutated pod: %s", string(marshaledPod)) + handlerLog.V(1).Info("mutated pod", "pod", string(marshaledPod)) resp := admission.PatchResponseFromRaw(raw, marshaledPod) return resp } @@ -129,14 +133,14 @@ func (s *SecretHandler) Handle(ctx context.Context, request admission.Request) a secret := &corev1.Secret{} err := s.decoder.Decode(request, secret) if err != nil { - klog.Errorf("unable to decoder secret from req, %v", err) + handlerLog.Error(err, "unable to decoder secret from req") return admission.Errored(http.StatusBadRequest, err) } jfs := juicefs.NewJfsProvider(nil, nil) secretValidateor := validator.NewSecretValidator(jfs) if err := secretValidateor.Validate(ctx, *secret); err != nil { - klog.Errorf("secret validation failed, secret: %s, err: %v", secret.Name, err) + handlerLog.Error(err, "secret validation failed", "name", secret.Name, "error", err) return admission.Errored(http.StatusBadRequest, err) } return admission.Allowed("") @@ -164,7 +168,7 @@ func (s *PVHandler) Handle(ctx context.Context, request admission.Request) admis pv := &corev1.PersistentVolume{} err := s.decoder.Decode(request, pv) if err != nil { - klog.Errorf("unable to decoder pv from req, %v", err) + handlerLog.Error(err, "unable to decoder pv from req") return admission.Errored(http.StatusBadRequest, err) } @@ -178,7 +182,7 @@ func (s *PVHandler) Handle(ctx context.Context, request admission.Request) admis volumeHandle := pv.Spec.CSI.VolumeHandle existPvs, err := s.Client.ListPersistentVolumesByVolumeHandle(ctx, volumeHandle) if err != nil { - klog.Errorf("list pv by volume handle %s failed, err: %v", volumeHandle, err) + handlerLog.Error(err, "list pv by volume handle failed", "volume handle", volumeHandle) return admission.Errored(http.StatusBadRequest, err) } if len(existPvs) > 0 { diff --git a/pkg/webhook/handler/mutate/sidecar.go b/pkg/webhook/handler/mutate/sidecar.go index 4b354f488f..fdf5615a0e 100644 --- a/pkg/webhook/handler/mutate/sidecar.go +++ b/pkg/webhook/handler/mutate/sidecar.go @@ -26,7 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/util/retry" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" @@ -36,6 +36,10 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) +var ( + sidecarLog = klog.NewKlogr().WithName("sidecar") +) + type SidecarMutate struct { Client *k8sclient.K8sClient juicefs juicefs.Interface @@ -71,7 +75,7 @@ func (s *SidecarMutate) mutate(ctx context.Context, pod *corev1.Pod, pair resour // get secret, volumeContext and mountOptions from PV secrets, volCtx, options, err := s.GetSettings(*pair.PV) if err != nil { - klog.Errorf("get settings from pv %s of pod %s namespace %s err: %v", pair.PV.Name, pod.Name, pod.Namespace, err) + sidecarLog.Error(err, "get settings from pv of pod err", "pv name", pair.PV.Name, "podName", pod.Name, "podNamespace", pod.Namespace) return } @@ -124,7 +128,7 @@ func (s *SidecarMutate) mutate(ctx context.Context, pod *corev1.Pod, pair resour // gen mount pod mountPod := r.NewMountSidecar() podStr, _ := json.Marshal(mountPod) - klog.V(6).Infof("mount pod: %v\n", string(podStr)) + sidecarLog.V(1).Info("generate mount pod", "mount pod", string(podStr)) // deduplicate container name and volume name in pod when multiple volumes are mounted s.Deduplicate(pod, mountPod, index) @@ -187,7 +191,7 @@ func (s *SidecarMutate) GetSettings(pv corev1.PersistentVolume) (secrets, volCtx secrets[k] = string(v) } volCtx = pv.Spec.CSI.VolumeAttributes - klog.V(5).Infof("volume context of pv %s: %v", pv.Name, volCtx) + sidecarLog.Info("volume context of pv", "pv", pv.Name, "volCtx", volCtx) options = []string{} if len(pv.Spec.AccessModes) == 1 && pv.Spec.AccessModes[0] == corev1.ReadOnlyMany { @@ -202,7 +206,7 @@ func (s *SidecarMutate) GetSettings(pv corev1.PersistentVolume) (secrets, volCtx mountOptions = strings.Split(opts, ",") } options = append(options, mountOptions...) - klog.V(5).Infof("volume options of pv %s: %v", pv.Name, options) + sidecarLog.Info("volume options of pv", "pv", pv.Name, "options", options) return } @@ -271,7 +275,7 @@ func (s *SidecarMutate) injectAnnotation(pod *corev1.Pod, annotations map[string } func (s *SidecarMutate) createOrUpdateSecret(ctx context.Context, secret *corev1.Secret) error { - klog.V(5).Infof("createOrUpdateSecret: %s, %s", secret.Name, secret.Namespace) + sidecarLog.Info("create or update secret", "name", secret.Name, "namespace", secret.Namespace) err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { oldSecret, err := s.Client.GetSecret(ctx, secret.Name, secret.Namespace) if err != nil { @@ -288,7 +292,7 @@ func (s *SidecarMutate) createOrUpdateSecret(ctx context.Context, secret *corev1 return s.Client.UpdateSecret(ctx, oldSecret) }) if err != nil { - klog.Errorf("createOrUpdateSecret: secret %s: %v", secret.Name, err) + sidecarLog.Error(err, "create or update secret error", "name", secret.Name) return err } return nil diff --git a/pkg/webhook/handler/register.go b/pkg/webhook/handler/register.go index 7f53f7b98a..7e1c29dfbc 100644 --- a/pkg/webhook/handler/register.go +++ b/pkg/webhook/handler/register.go @@ -17,7 +17,7 @@ package handler import ( - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -32,13 +32,17 @@ const ( PVPath = "/juicefs/validate-pv" ) +var ( + webhookLog = klog.NewKlogr().WithName("webhook") +) + // Register registers the handlers to the manager func Register(mgr manager.Manager, client *k8sclient.K8sClient) { server := mgr.GetWebhookServer() server.Register(SidecarPath, &webhook.Admission{Handler: NewSidecarHandler(client, false)}) - klog.Infof("Registered webhook handler path %s for sidecar", SidecarPath) + webhookLog.Info("Registered webhook handler for sidecar", "path", SidecarPath) server.Register(ServerlessPath, &webhook.Admission{Handler: NewSidecarHandler(client, true)}) - klog.Infof("Registered webhook handler path %s for serverless", ServerlessPath) + webhookLog.Info("Registered webhook handler path %s for serverless", "path", ServerlessPath) if config.ValidatingWebhook { server.Register(SecretPath, &webhook.Admission{Handler: NewSecretHandler(client)}) server.Register(PVPath, &webhook.Admission{Handler: NewPVHandler(client)}) From d93f213ec7ffe11ecd8c362e4881ee5db6b30704 Mon Sep 17 00:00:00 2001 From: zwwhdls Date: Wed, 28 Aug 2024 11:17:28 +0800 Subject: [PATCH 2/2] refact log Signed-off-by: zwwhdls --- .github/scripts/deploy-csi-in-k8s.sh | 4 +-- .../csi-ci/pod-mount-share/daemonset.yaml | 2 +- .../csi-ci/pod-mount-share/statefulset.yaml | 2 +- .../csi-ci/pod-provisioner/daemonset.yaml | 2 +- .../csi-ci/pod-provisioner/statefulset.yaml | 2 +- deploy/kubernetes/csi-ci/pod/daemonset.yaml | 2 +- deploy/kubernetes/csi-ci/pod/statefulset.yaml | 2 +- .../kubernetes/csi-ci/process/daemonset.yaml | 2 +- .../csi-ci/process/statefulset.yaml | 2 +- .../webhook-provisioner/statefulset.yaml | 2 +- .../csi-ci/webhook/statefulset.yaml | 2 +- .../pod-mount-share/daemonset.yaml | 2 +- .../pod-mount-share/statefulset.yaml | 2 +- .../pod-provisioner/daemonset.yaml | 2 +- .../pod-provisioner/statefulset.yaml | 2 +- .../csi-ci/without-kubelet/pod/daemonset.yaml | 2 +- .../without-kubelet/pod/statefulset.yaml | 2 +- pkg/controller/pod_driver.go | 7 ++++- pkg/controller/pod_driver_test.go | 1 + pkg/driver/node_test.go | 8 ++++++ pkg/fuse/passfd.go | 7 ++++- pkg/juicefs/juicefs_test.go | 28 +++++++++++++++++++ pkg/juicefs/mount/builder/pod_test.go | 2 ++ pkg/juicefs/mount/pod_mount.go | 6 +++- pkg/juicefs/mount/pod_mount_test.go | 5 ++++ 25 files changed, 79 insertions(+), 21 deletions(-) diff --git a/.github/scripts/deploy-csi-in-k8s.sh b/.github/scripts/deploy-csi-in-k8s.sh index 9583eeccf1..5c0b8add9a 100755 --- a/.github/scripts/deploy-csi-in-k8s.sh +++ b/.github/scripts/deploy-csi-in-k8s.sh @@ -122,7 +122,7 @@ function deploy_webhook() { sudo kustomize build ${GITHUB_WORKSPACE}/deploy/kubernetes/csi-ci/webhook >> ${GITHUB_WORKSPACE}/deploy/webhook.yaml sudo ${GITHUB_WORKSPACE}/hack/update_install_script.sh sudo ${GITHUB_WORKSPACE}/scripts/juicefs-csi-webhook-install.sh print | sed -e "s@juicedata/juicefs-csi-driver.*\$@juicedata/juicefs-csi-driver:${dev_tag}@g" \ - -e 's@/var/lib/kubelet@/var/snap/microk8s/common/var/lib/kubelet@g' -e 's@--v=5@--v=6@g' -e "s@juicedata/csi-dashboard.*\$@juicedata/csi-dashboard:${dev_tag}@g" | sudo microk8s.kubectl apply -f - + -e 's@/var/lib/kubelet@/var/snap/microk8s/common/var/lib/kubelet@g' -e "s@juicedata/csi-dashboard.*\$@juicedata/csi-dashboard:${dev_tag}@g" | sudo microk8s.kubectl apply -f - # Wait until the deploy finish timeout=0 while true; do @@ -161,7 +161,7 @@ function deploy_webhook_provisioner() { sudo kustomize build ${GITHUB_WORKSPACE}/deploy/kubernetes/csi-ci/webhook-provisioner > ${GITHUB_WORKSPACE}/deploy/webhook.yaml sudo ${GITHUB_WORKSPACE}/hack/update_install_script.sh sudo ${GITHUB_WORKSPACE}/scripts/juicefs-csi-webhook-install.sh print | sed -e "s@juicedata/juicefs-csi-driver.*\$@juicedata/juicefs-csi-driver:${dev_tag}@g" \ - -e 's@/var/lib/kubelet@/var/snap/microk8s/common/var/lib/kubelet@g' -e 's@--v=5@--v=6@g' -e "s@juicedata/csi-dashboard.*\$@juicedata/csi-dashboard:${dev_tag}@g" | sudo microk8s.kubectl apply -f - + -e 's@/var/lib/kubelet@/var/snap/microk8s/common/var/lib/kubelet@g' -e "s@juicedata/csi-dashboard.*\$@juicedata/csi-dashboard:${dev_tag}@g" | sudo microk8s.kubectl apply -f - # Wait until the deploy finish timeout=0 while true; do diff --git a/deploy/kubernetes/csi-ci/pod-mount-share/daemonset.yaml b/deploy/kubernetes/csi-ci/pod-mount-share/daemonset.yaml index 5d5d6348a3..3d41f960af 100644 --- a/deploy/kubernetes/csi-ci/pod-mount-share/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/pod-mount-share/daemonset.yaml @@ -12,7 +12,7 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/kubernetes/csi-ci/pod-mount-share/statefulset.yaml b/deploy/kubernetes/csi-ci/pod-mount-share/statefulset.yaml index acea3364a0..6f9d40fd45 100644 --- a/deploy/kubernetes/csi-ci/pod-mount-share/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/pod-mount-share/statefulset.yaml @@ -14,7 +14,7 @@ spec: - --nodeid=$(NODE_NAME) - --leader-election - --cache-client-conf - - --v=6 + - --v=1 - --config=/etc/config/config.yaml env: - name: CSI_ENDPOINT diff --git a/deploy/kubernetes/csi-ci/pod-provisioner/daemonset.yaml b/deploy/kubernetes/csi-ci/pod-provisioner/daemonset.yaml index 40cac84f12..9368adb8ce 100644 --- a/deploy/kubernetes/csi-ci/pod-provisioner/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/pod-provisioner/daemonset.yaml @@ -12,6 +12,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml diff --git a/deploy/kubernetes/csi-ci/pod-provisioner/statefulset.yaml b/deploy/kubernetes/csi-ci/pod-provisioner/statefulset.yaml index 3571d98673..244b789224 100644 --- a/deploy/kubernetes/csi-ci/pod-provisioner/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/pod-provisioner/statefulset.yaml @@ -13,7 +13,7 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --provisioner=true - --config=/etc/config/config.yaml - name: csi-provisioner diff --git a/deploy/kubernetes/csi-ci/pod/daemonset.yaml b/deploy/kubernetes/csi-ci/pod/daemonset.yaml index 40cac84f12..9368adb8ce 100644 --- a/deploy/kubernetes/csi-ci/pod/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/pod/daemonset.yaml @@ -12,6 +12,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml diff --git a/deploy/kubernetes/csi-ci/pod/statefulset.yaml b/deploy/kubernetes/csi-ci/pod/statefulset.yaml index 3e2f7e3f4f..83dbc1212c 100644 --- a/deploy/kubernetes/csi-ci/pod/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/pod/statefulset.yaml @@ -13,5 +13,5 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --config=/etc/config/config.yaml diff --git a/deploy/kubernetes/csi-ci/process/daemonset.yaml b/deploy/kubernetes/csi-ci/process/daemonset.yaml index f3ac9b7b04..0056fb8d55 100644 --- a/deploy/kubernetes/csi-ci/process/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/process/daemonset.yaml @@ -12,6 +12,6 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --by-process=true - --config=/etc/config/config.yaml \ No newline at end of file diff --git a/deploy/kubernetes/csi-ci/process/statefulset.yaml b/deploy/kubernetes/csi-ci/process/statefulset.yaml index 2cbcf0c1f9..f7f79e90cb 100644 --- a/deploy/kubernetes/csi-ci/process/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/process/statefulset.yaml @@ -13,6 +13,6 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --by-process=true - --config=/etc/config/config.yaml \ No newline at end of file diff --git a/deploy/kubernetes/csi-ci/webhook-provisioner/statefulset.yaml b/deploy/kubernetes/csi-ci/webhook-provisioner/statefulset.yaml index 18661894f0..e74a96c0f2 100644 --- a/deploy/kubernetes/csi-ci/webhook-provisioner/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/webhook-provisioner/statefulset.yaml @@ -13,7 +13,7 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --webhook=true - --provisioner=true - --config=/etc/config/config.yaml diff --git a/deploy/kubernetes/csi-ci/webhook/statefulset.yaml b/deploy/kubernetes/csi-ci/webhook/statefulset.yaml index ee4d02f83d..f402ed46a1 100644 --- a/deploy/kubernetes/csi-ci/webhook/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/webhook/statefulset.yaml @@ -13,7 +13,7 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --webhook=true - --validating-webhook=true - --config=/etc/config/config.yaml \ No newline at end of file diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/daemonset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/daemonset.yaml index 8b4b1f187b..66098242d1 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/daemonset.yaml @@ -12,7 +12,7 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/statefulset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/statefulset.yaml index acea3364a0..6f9d40fd45 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod-mount-share/statefulset.yaml @@ -14,7 +14,7 @@ spec: - --nodeid=$(NODE_NAME) - --leader-election - --cache-client-conf - - --v=6 + - --v=1 - --config=/etc/config/config.yaml env: - name: CSI_ENDPOINT diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/daemonset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/daemonset.yaml index cbca871e2a..1c71775419 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/daemonset.yaml @@ -12,7 +12,7 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/statefulset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/statefulset.yaml index 3571d98673..244b789224 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod-provisioner/statefulset.yaml @@ -13,7 +13,7 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --provisioner=true - --config=/etc/config/config.yaml - name: csi-provisioner diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod/daemonset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod/daemonset.yaml index cbca871e2a..1c71775419 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod/daemonset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod/daemonset.yaml @@ -12,7 +12,7 @@ spec: - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --nodeid=$(NODE_NAME) - - --v=6 + - --v=1 - --enable-manager=true - --config=/etc/config/config.yaml env: diff --git a/deploy/kubernetes/csi-ci/without-kubelet/pod/statefulset.yaml b/deploy/kubernetes/csi-ci/without-kubelet/pod/statefulset.yaml index 1fbf00327d..bc469c8b38 100644 --- a/deploy/kubernetes/csi-ci/without-kubelet/pod/statefulset.yaml +++ b/deploy/kubernetes/csi-ci/without-kubelet/pod/statefulset.yaml @@ -13,5 +13,5 @@ spec: - --logtostderr - --nodeid=$(NODE_NAME) - --leader-election - - --v=6 + - --v=1 - --config=/etc/config/config.yaml \ No newline at end of file diff --git a/pkg/controller/pod_driver.go b/pkg/controller/pod_driver.go index 3d463a948c..810f5098c0 100644 --- a/pkg/controller/pod_driver.go +++ b/pkg/controller/pod_driver.go @@ -93,6 +93,7 @@ func (p *PodDriver) SetMountInfo(mit mountInfoTable) { } func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) (Result, error) { + ctx = util.WithLog(ctx, podDriverLog) podStatus := getPodStatus(current) podDriverLog.V(1).Info("start handle pod", "namespace", current.Namespace, "name", current.Name, "status", podStatus) // check refs in mount pod annotation first, delete ref that target pod is not found @@ -910,7 +911,11 @@ func mkrMp(ctx context.Context, pod corev1.Pod) error { return err } err = util.DoWithTimeout(ctx, 3*time.Second, func() error { - return os.MkdirAll(mntPath, 0777) + exist, _ := mount.PathExists(mntPath) + if !exist { + return os.MkdirAll(mntPath, 0777) + } + return nil }) if err != nil { return err diff --git a/pkg/controller/pod_driver_test.go b/pkg/controller/pod_driver_test.go index dfb527cb8f..7f75513600 100644 --- a/pkg/controller/pod_driver_test.go +++ b/pkg/controller/pod_driver_test.go @@ -467,6 +467,7 @@ func genMountInfos() []mount.MountInfo { } func TestPodDriver_podReadyHandler(t *testing.T) { + defer func() { _ = os.RemoveAll("tmp") }() fuse.InitTestFds() Convey("Test pod ready handler", t, FailureContinues, func() { Convey("pod ready add need recovery ", func() { diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 8b007116d1..b017185f11 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -486,6 +486,10 @@ func TestNodeUnpublishVolume(t *testing.T) { metrics := newNodeMetrics(registerer) Convey("Test NodeUnpublishVolume", t, func() { Convey("test normal", func() { + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() targetPath := "/test/path" volumeId := "vol-test" @@ -516,6 +520,10 @@ func TestNodeUnpublishVolume(t *testing.T) { targetPath := "/test/path" volumeId := "vol-test" + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() diff --git a/pkg/fuse/passfd.go b/pkg/fuse/passfd.go index 82ff94965b..63170298c9 100644 --- a/pkg/fuse/passfd.go +++ b/pkg/fuse/passfd.go @@ -104,7 +104,12 @@ func (fs *Fds) GetFdAddress(ctx context.Context, podHashVal string) (string, err addressInPod := path.Join(fs.basePath, "fuse_fd_csi_comm.sock") // mkdir parent err := util.DoWithTimeout(ctx, 2*time.Second, func() error { - return os.MkdirAll(path.Join(fs.basePath, podHashVal), 0777) + parentPath := path.Join(fs.basePath, podHashVal) + exist, _ := k8sMount.PathExists(parentPath) + if !exist { + return os.MkdirAll(parentPath, 0777) + } + return nil }) if err != nil { return "", err diff --git a/pkg/juicefs/juicefs_test.go b/pkg/juicefs/juicefs_test.go index b9ff367837..e4e76b1ac2 100644 --- a/pkg/juicefs/juicefs_test.go +++ b/pkg/juicefs/juicefs_test.go @@ -642,6 +642,10 @@ func Test_juicefs_MountFs(t *testing.T) { return true, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -673,6 +677,10 @@ func Test_juicefs_MountFs(t *testing.T) { return true, errors.New("test") }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -714,6 +722,10 @@ func Test_juicefs_MountFs(t *testing.T) { return true, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -750,6 +762,10 @@ func Test_juicefs_MountFs(t *testing.T) { return true, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -786,6 +802,10 @@ func Test_juicefs_MountFs(t *testing.T) { return true, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -822,6 +842,10 @@ func Test_juicefs_MountFs(t *testing.T) { return false, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() @@ -857,6 +881,10 @@ func Test_juicefs_MountFs(t *testing.T) { return false, nil }) defer patch1.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() mockCtl := gomock.NewController(t) defer mockCtl.Finish() diff --git a/pkg/juicefs/mount/builder/pod_test.go b/pkg/juicefs/mount/builder/pod_test.go index 596ff91310..c1a09a80e1 100644 --- a/pkg/juicefs/mount/builder/pod_test.go +++ b/pkg/juicefs/mount/builder/pod_test.go @@ -19,6 +19,7 @@ package builder import ( "encoding/json" "fmt" + "os" "path" "reflect" "testing" @@ -208,6 +209,7 @@ func Test_getCacheDirVolumes(t *testing.T) { } func TestNewMountPod(t *testing.T) { + defer func() { _ = os.RemoveAll("tmp") }() fuse.InitTestFds() config.NodeName = "node" config.Namespace = "" diff --git a/pkg/juicefs/mount/pod_mount.go b/pkg/juicefs/mount/pod_mount.go index 777b290bc3..a06f493de7 100644 --- a/pkg/juicefs/mount/pod_mount.go +++ b/pkg/juicefs/mount/pod_mount.go @@ -339,7 +339,11 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin jfsSetting.SecretName = fmt.Sprintf("juicefs-%s-secret", jfsSetting.UniqueId) // mkdir mountpath err = util.DoWithTimeout(ctx, 3*time.Second, func() error { - return os.MkdirAll(jfsSetting.MountPath, 0777) + exist, _ := k8sMount.PathExists(jfsSetting.MountPath) + if !exist { + return os.MkdirAll(jfsSetting.MountPath, 0777) + } + return nil }) if err != nil { return diff --git a/pkg/juicefs/mount/pod_mount_test.go b/pkg/juicefs/mount/pod_mount_test.go index 2b1797f88d..d9ddb3db5f 100644 --- a/pkg/juicefs/mount/pod_mount_test.go +++ b/pkg/juicefs/mount/pod_mount_test.go @@ -274,6 +274,7 @@ func TestAddRefOfMountWithMock(t *testing.T) { } func TestJUmount(t *testing.T) { + defer func() { _ = os.RemoveAll("tmp") }() fakeClientSet := fake.NewSimpleClientset() fuse.InitTestFds() @@ -740,6 +741,10 @@ func TestJMount(t *testing.T) { return mocks.FakeFileInfoIno1{}, nil }) defer patch5.Reset() + patch := ApplyFunc(os.MkdirAll, func(path string, perm os.FileMode) error { + return nil + }) + defer patch.Reset() fakeClient := fake.NewSimpleClientset() p := NewPodMount(&k8sclient.K8sClient{Interface: fakeClient}, mount.SafeFormatAndMount{