From 03d76860e8ba845bcda3b059a61360071f5a7723 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 14:39:03 +0800 Subject: [PATCH 1/8] rename Signed-off-by: drivebyer --- main.go | 6 +++--- pkg/controllers/redis/redis_controller.go | 8 ++++---- pkg/controllers/redis/redis_controller_suite_test.go | 2 +- .../rediscluster/rediscluster_controller.go | 8 ++++---- .../rediscluster_controller_suite_test.go | 2 +- .../redisreplication/redisreplication_controller.go | 12 ++++++------ .../redisreplication_controller_suite_test.go | 2 +- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/main.go b/main.go index 9d46df50b..7d761d59d 100644 --- a/main.go +++ b/main.go @@ -118,7 +118,7 @@ func main() { os.Exit(1) } - if err = (&redis.RedisReconciler{ + if err = (&redis.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, @@ -129,7 +129,7 @@ func main() { os.Exit(1) } rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - if err = (&rediscluster.RedisClusterReconciler{ + if err = (&rediscluster.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, @@ -141,7 +141,7 @@ func main() { os.Exit(1) } rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - if err = (&redisreplication.RedisReplicationReconciler{ + if err = (&redisreplication.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 41f11d811..72323cee9 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -31,8 +31,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RedisReconciler reconciles a Redis object -type RedisReconciler struct { +// Reconciler reconciles a Redis object +type Reconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface @@ -40,7 +40,7 @@ type RedisReconciler struct { Scheme *runtime.Scheme } -func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) reqLogger.Info("Reconciling opstree redis controller") instance := &redisv1beta2.Redis{} @@ -73,7 +73,7 @@ func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.Redis{}). Complete(r) diff --git a/pkg/controllers/redis/redis_controller_suite_test.go b/pkg/controllers/redis/redis_controller_suite_test.go index a7a7b4b59..594bd2007 100644 --- a/pkg/controllers/redis/redis_controller_suite_test.go +++ b/pkg/controllers/redis/redis_controller_suite_test.go @@ -99,7 +99,7 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - err = (&RedisReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index acfc56079..d44632853 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -35,8 +35,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RedisClusterReconciler reconciles a RedisCluster object -type RedisClusterReconciler struct { +// Reconciler reconciles a RedisCluster object +type Reconciler struct { client.Client k8sutils.StatefulSet K8sClient kubernetes.Interface @@ -45,7 +45,7 @@ type RedisClusterReconciler struct { Scheme *runtime.Scheme } -func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) reqLogger.V(1).Info("Reconciling opstree redis Cluster controller") instance := &redisv1beta2.RedisCluster{} @@ -243,7 +243,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request } // SetupWithManager sets up the controller with the Manager. -func (r *RedisClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisCluster{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go index 5f9d5ef2d..4b09ff422 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go +++ b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go @@ -101,7 +101,7 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - err = (&RedisClusterReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index fb5955d6e..81ac0758f 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -17,8 +17,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RedisReplicationReconciler reconciles a RedisReplication object -type RedisReplicationReconciler struct { +// Reconciler reconciles a RedisReplication object +type Reconciler struct { client.Client k8sutils.Pod k8sutils.StatefulSet @@ -28,7 +28,7 @@ type RedisReplicationReconciler struct { Scheme *runtime.Scheme } -func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) reqLogger.Info("Reconciling opstree redis replication controller") instance := &redisv1beta2.RedisReplication{} @@ -84,7 +84,7 @@ func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Req return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") } -func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { if instance.Status.MasterNode == masterNode { return nil } @@ -95,7 +95,7 @@ func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Co return nil } -func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { labels := k8sutils.GetRedisReplicationLabels(cr) pods, err := r.ListPods(ctx, cr.GetNamespace(), labels) if err != nil { @@ -121,7 +121,7 @@ func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisReplication{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go index 0081b194e..8ef3a7f5f 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go +++ b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go @@ -101,7 +101,7 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - err = (&RedisReplicationReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, From b47c52a722c3561c6e2ed5905d68b7090053799d Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 15:36:59 +0800 Subject: [PATCH 2/8] add context param Signed-off-by: drivebyer --- mocks/utils/utils.go | 3 +- pkg/controllers/redis/redis_controller.go | 8 +- .../rediscluster/rediscluster_controller.go | 32 +++---- .../redisreplication_controller.go | 8 +- .../redissentinel/redissentinel_controller.go | 8 +- pkg/k8sutils/cluster-scaling.go | 65 +++++++------- pkg/k8sutils/finalizer.go | 22 ++--- pkg/k8sutils/finalizer_test.go | 16 ++-- pkg/k8sutils/poddisruption.go | 12 +-- pkg/k8sutils/redis-cluster.go | 34 +++---- pkg/k8sutils/redis-cluster_test.go | 9 +- pkg/k8sutils/redis-replication.go | 7 +- pkg/k8sutils/redis-sentinel.go | 15 ++-- pkg/k8sutils/redis-sentinel_test.go | 2 +- pkg/k8sutils/redis-standalone.go | 6 +- pkg/k8sutils/redis.go | 90 +++++++++---------- pkg/k8sutils/redis_test.go | 12 +-- pkg/k8sutils/secrets.go | 4 +- pkg/k8sutils/secrets_test.go | 5 +- pkg/k8sutils/statefulset.go | 18 ++-- pkg/k8sutils/statefulset_test.go | 10 +-- pkg/k8sutils/status.go | 2 +- 22 files changed, 198 insertions(+), 190 deletions(-) diff --git a/mocks/utils/utils.go b/mocks/utils/utils.go index b48dad6a2..93bf3d90b 100644 --- a/mocks/utils/utils.go +++ b/mocks/utils/utils.go @@ -1,6 +1,7 @@ package utils import ( + "context" "fmt" "strconv" @@ -76,7 +77,7 @@ func CreateFakeObjectWithSecret(name, namespace, key string) []runtime.Object { return []runtime.Object{secret} } -func CreateFakeClientWithSecrets(cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { +func CreateFakeClientWithSecrets(ctx context.Context, cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { leaderReplicas := cr.Spec.GetReplicaCounts("leader") followerReplicas := cr.Spec.GetReplicaCounts("follower") pods := make([]runtime.Object, 0) diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 72323cee9..390171354 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -50,7 +50,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { + if err = k8sutils.HandleRedisFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis finalizer") } return intctrlutil.Reconciled() @@ -58,14 +58,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if _, found := instance.ObjectMeta.GetAnnotations()["redis.opstreelabs.in/skip-reconcile"]; found { return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisFinalizer, r.Client); err != nil { + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisFinalizer, r.Client); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") } - err = k8sutils.CreateStandaloneRedis(instance, r.K8sClient) + err = k8sutils.CreateStandaloneRedis(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to create redis") } - err = k8sutils.CreateStandaloneService(instance, r.K8sClient) + err = k8sutils.CreateStandaloneService(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to create service") } diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index d44632853..895e5f918 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -55,7 +55,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis cluster instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisClusterFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { + if err = k8sutils.HandleRedisClusterFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis cluster finalizer") } return intctrlutil.Reconciled() @@ -69,7 +69,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu followerReplicas := instance.Spec.GetReplicaCounts("follower") totalReplicas := leaderReplicas + followerReplicas - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") } @@ -90,11 +90,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Step 1 Remove the Follower Node k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, r.Log, instance) // Step 2 Reshard the Cluster - k8sutils.ReshardRedisCluster(r.K8sClient, r.Log, instance, true) + k8sutils.ReshardRedisCluster(ctx, r.K8sClient, r.Log, instance, true) } reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster") // Step 3 Rebalance the cluster - k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance) + k8sutils.RebalanceRedisCluster(ctx, r.K8sClient, r.Log, instance) reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") } @@ -102,23 +102,23 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Mark the cluster status as initializing if there are no leader or follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyLeaderReplicas != leaderReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } } if leaderReplicas != 0 { - err = k8sutils.CreateRedisLeaderService(instance, r.K8sClient) + err = k8sutils.CreateRedisLeaderService(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } } - err = k8sutils.CreateRedisLeader(instance, r.K8sClient) + err = k8sutils.CreateRedisLeader(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } @@ -127,23 +127,23 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Mark the cluster status as initializing if there are no follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyFollowerReplicas != followerReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } } // if we have followers create their service. if followerReplicas != 0 { - err = k8sutils.CreateRedisFollowerService(instance, r.K8sClient) + err = k8sutils.CreateRedisFollowerService(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } } - err = k8sutils.CreateRedisFollower(instance, r.K8sClient) + err = k8sutils.CreateRedisFollower(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } @@ -155,7 +155,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Mark the cluster status as bootstrapping if all the leader and follower nodes are ready if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } @@ -174,7 +174,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Step 2 : Add Redis Node k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, r.Log, instance) // Step 3 Rebalance the cluster using the empty masters - k8sutils.RebalanceRedisClusterEmptyMasters(r.K8sClient, r.Log, instance) + k8sutils.RebalanceRedisClusterEmptyMasters(ctx, r.K8sClient, r.Log, instance) } } } else { @@ -194,7 +194,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu reqLogger.Error(err, "failed to determine unhealthy node count in cluster") } if int(totalReplicas) > 1 && unhealthyNodeCount >= int(totalReplicas)-1 { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } @@ -233,7 +233,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Mark the cluster status as ready if all the leader and follower nodes are ready if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas { if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, r.Log, instance) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 81ac0758f..3436ee8ba 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -38,7 +38,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisReplicationFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { + if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } return intctrlutil.Reconciled() @@ -46,14 +46,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } - err = k8sutils.CreateReplicationRedis(instance, r.K8sClient) + err = k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } - err = k8sutils.CreateReplicationService(instance, r.K8sClient) + err = k8sutils.CreateReplicationService(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } diff --git a/pkg/controllers/redissentinel/redissentinel_controller.go b/pkg/controllers/redissentinel/redissentinel_controller.go index feb1ba695..9b2ca19a1 100644 --- a/pkg/controllers/redissentinel/redissentinel_controller.go +++ b/pkg/controllers/redissentinel/redissentinel_controller.go @@ -37,7 +37,7 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisSentinelFinalizer(r.Client, r.Log, instance); err != nil { + if err = k8sutils.HandleRedisSentinelFinalizer(ctx, r.Client, r.Log, instance); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } return intctrlutil.Reconciled() @@ -50,7 +50,7 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Get total Sentinel Replicas // sentinelReplicas := instance.Spec.GetSentinelCounts("sentinel") - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } @@ -75,13 +75,13 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques return intctrlutil.RequeueWithError(err, reqLogger, "") } - err = k8sutils.ReconcileSentinelPodDisruptionBudget(instance, instance.Spec.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileSentinelPodDisruptionBudget(ctx, instance, instance.Spec.PodDisruptionBudget, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } // Create the Service for Redis Sentinel - err = k8sutils.CreateRedisSentinelService(instance, r.K8sClient) + err = k8sutils.CreateRedisSentinelService(ctx, instance, r.K8sClient) if err != nil { return intctrlutil.RequeueWithError(err, reqLogger, "") } diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index 4b6edec9e..cae21621b 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -15,9 +15,8 @@ import ( // ReshardRedisCluster transfer the slots from the last node to the first node. // // NOTE: when all slot been transferred, the node become slave of the first master node. -func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { - ctx := context.TODO() - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var cmd []string @@ -38,11 +37,11 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(transferPOD, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, transferPOD, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, transferPOD, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -77,7 +76,7 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re logger.V(1).Info("Skipped the execution of", "Cmd", cmd) return } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if remove { RemoveRedisNodeFromCluster(ctx, client, logger, cr, removePOD) @@ -108,7 +107,7 @@ func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger // getRedisNodeID would return nodeID of a redis node by passing pod func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { - redisClient := configureRedisClient(client, logger, cr, pod.PodName) + redisClient := configureRedisClient(ctx, client, logger, cr, pod.PodName) defer redisClient.Close() pong, err := redisClient.Ping(ctx).Result() @@ -134,7 +133,7 @@ func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger log } // Rebalance the Redis CLuster using the Empty Master Nodes -func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : --cluster-use-empty-masters -a var cmd []string pod := RedisDetails{ @@ -146,13 +145,13 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) } cmd = append(cmd, "--cluster-use-empty-masters") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -163,12 +162,12 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() for i := 0; i < int(totalRedisLeaderNodes); i++ { @@ -181,14 +180,14 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logge if podSlots == "0" || podSlots == "" { logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) - RebalanceRedisClusterEmptyMasters(client, logger, cr) + RebalanceRedisClusterEmptyMasters(ctx, client, logger, cr) break } } } // Rebalance Redis Cluster Would Rebalance the Redis Cluster without using the empty masters -func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : -a var cmd []string pod := RedisDetails{ @@ -200,11 +199,11 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -215,7 +214,7 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } // Add redis cluster node would add a node to the existing redis cluster using redis-cli @@ -238,12 +237,12 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisHostname(newPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, newPod, *cr.Spec.Port)) - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, newPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -254,7 +253,7 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster add-node command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader @@ -277,7 +276,7 @@ func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, // Remove redis follower node would remove all follower nodes of last leader node using redis-cli func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") @@ -293,7 +292,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. cmd = []string{"redis-cli"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -309,13 +308,13 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) } for _, followerNodeID := range followerNodeIDs { cmd = append(cmd, followerNodeID) logger.V(1).Info("Redis cluster follower remove command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") cmd = cmd[:len(cmd)-1] } } @@ -323,7 +322,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() // currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") @@ -341,14 +340,14 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) } removePodNodeID := getRedisNodeID(ctx, client, logger, cr, removePod) cmd = append(cmd, removePodNodeID) if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -362,14 +361,14 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" { logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // verifyLeaderPod return true if the pod is leader/master func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) - redisClient := configureRedisClient(client, logger, cr, podName) + redisClient := configureRedisClient(ctx, client, logger, cr, podName) defer redisClient.Close() return verifyLeaderPodInfo(ctx, redisClient, logger, podName) } @@ -406,11 +405,11 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -421,5 +420,5 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, slavePodName)...) logger.V(1).Info("Redis cluster failover command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, slavePodName) + executeCommand(ctx, client, logger, cr, cmd, slavePodName) } diff --git a/pkg/k8sutils/finalizer.go b/pkg/k8sutils/finalizer.go index 338224698..e16522d14 100644 --- a/pkg/k8sutils/finalizer.go +++ b/pkg/k8sutils/finalizer.go @@ -22,11 +22,11 @@ const ( ) // HandleRedisFinalizer finalize resource if instance is marked to be deleted -func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisPVC(ctx, k8sClient, logger, cr); err != nil { return err } } @@ -41,11 +41,11 @@ func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interfa } // HandleRedisClusterFinalizer finalize resource if instance is marked to be deleted -func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisClusterFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisClusterPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisClusterPVC(ctx, k8sClient, logger, cr); err != nil { return err } } @@ -60,11 +60,11 @@ func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes. } // Handle RedisReplicationFinalizer finalize resource if instance is marked to be deleted -func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisReplicationFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisReplicationPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisReplicationPVC(ctx, k8sClient, logger, cr); err != nil { return err } } @@ -79,7 +79,7 @@ func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kuberne } // HandleRedisSentinelFinalizer finalize resource if instance is marked to be deleted -func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { +func HandleRedisSentinelFinalizer(ctx context.Context, ctrlclient client.Client, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisSentinelFinalizer) { controllerutil.RemoveFinalizer(cr, RedisSentinelFinalizer) @@ -93,7 +93,7 @@ func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, } // AddFinalizer add finalizer for graceful deletion -func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { +func AddFinalizer(ctx context.Context, cr client.Object, finalizer string, cl client.Client) error { if !controllerutil.ContainsFinalizer(cr, finalizer) { controllerutil.AddFinalizer(cr, finalizer) return cl.Update(context.TODO(), cr) @@ -102,7 +102,7 @@ func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { } // finalizeRedisPVC delete PVC -func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func finalizeRedisPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-0", pvcTemplateName, cr.Name) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) @@ -114,7 +114,7 @@ func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redis } // finalizeRedisClusterPVC delete PVCs -func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { for _, role := range []string{"leader", "follower"} { for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name+"-"+role) @@ -140,7 +140,7 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr } // finalizeRedisReplicationPVC delete PVCs -func finalizeRedisReplicationPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func finalizeRedisReplicationPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-%d", pvcTemplateName, cr.Name, i) diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index a5324369b..8e2e67816 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -147,7 +147,7 @@ func TestHandleRedisFinalizer(t *testing.T) { assert.NoError(t, err) } - err := HandleRedisFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -279,7 +279,7 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { } } - err := HandleRedisClusterFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisClusterFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -478,7 +478,7 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { } } - err := HandleRedisReplicationFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisReplicationFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -558,7 +558,7 @@ func TestHandleRedisSentinelFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { logger := testr.New(t) - err := HandleRedisSentinelFinalizer(tc.mockClient, logger, tc.cr) + err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, logger, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -618,7 +618,7 @@ func TestFinalizeRedisPVC(t *testing.T) { assert.NoError(t, err) } - err := finalizeRedisPVC(k8sClient, logger, cr) + err := finalizeRedisPVC(context.TODO(), k8sClient, logger, cr) if tc.expectError { assert.Error(t, err) assert.Equal(t, tc.errorExpected, err) @@ -702,7 +702,7 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisReplicationPVC(k8sClient, logger, tc.redisReplication) + err := finalizeRedisReplicationPVC(context.TODO(), k8sClient, logger, tc.redisReplication) if tc.expectError { assert.Error(t, err) } else { @@ -773,7 +773,7 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisClusterPVC(k8sClient, logger, tc.redisCluster) + err := finalizeRedisClusterPVC(context.TODO(), k8sClient, logger, tc.redisCluster) if tc.expectError { assert.Error(t, err) } else { @@ -886,7 +886,7 @@ func TestAddFinalizer(t *testing.T) { return nil }, } - err := AddFinalizer(tt.args.cr, tt.args.finalizer, mc) + err := AddFinalizer(context.TODO(), tt.args.cr, tt.args.finalizer, mc) if (err != nil) != tt.wantErr { t.Errorf("AddFinalizer() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 4561b8cc9..18ea93e43 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -16,14 +16,14 @@ import ( ) // CreateRedisLeaderPodDisruptionBudget check and create a PodDisruptionBudget for Leaders -func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-" + role logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, cluster, role, cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generatePodDisruptionBudgetDef(cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) + pdbDef := generatePodDisruptionBudgetDef(ctx, cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) } else { // Check if one exists, and delete it. @@ -39,14 +39,14 @@ func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role strin } } -func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-sentinel" logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, sentinel, "sentinel", cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generateSentinelPodDisruptionBudgetDef(cr, "sentinel", pdbMeta, pdbParams) + pdbDef := generateSentinelPodDisruptionBudgetDef(ctx, cr, "sentinel", pdbMeta, pdbParams) return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) } else { // Check if one exists, and delete it. @@ -63,7 +63,7 @@ func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbPar } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generatePodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, @@ -90,7 +90,7 @@ func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generateSentinelPodDisruptionBudgetDef(cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index 7fe9515dd..bebc97738 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -1,6 +1,7 @@ package k8sutils import ( + "context" "strconv" "strings" @@ -32,7 +33,7 @@ type RedisClusterService struct { } // generateRedisClusterParams generates Redis cluster information -func generateRedisClusterParams(cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { +func generateRedisClusterParams(ctx context.Context, cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -105,7 +106,7 @@ func generateRedisClusterInitContainerParams(cr *redisv1beta2.RedisCluster) init } // generateRedisClusterContainerParams generates Redis container information -func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { +func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -211,7 +212,7 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo } // CreateRedisLeader will create a leader redis setup -func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeader(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "leader", SecurityContext: cr.Spec.RedisLeader.SecurityContext, @@ -225,11 +226,11 @@ func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) e if cr.Spec.RedisLeader.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisFollower will create a follower redis setup -func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollower(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "follower", SecurityContext: cr.Spec.RedisFollower.SecurityContext, @@ -243,23 +244,23 @@ func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) if cr.Spec.RedisFollower.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisLeaderService method will create service for Redis Leader -func CreateRedisLeaderService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeaderService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "leader", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } // CreateRedisFollowerService method will create service for Redis Follower -func CreateRedisFollowerService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollowerService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "follower", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) int32 { @@ -267,21 +268,22 @@ func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) in } // CreateRedisClusterSetup will create Redis Setup for leader and follower -func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterSTS) CreateRedisClusterSetup(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType logger := statefulSetLogger(cr.Namespace, stateFulName) labels := getRedisLabels(stateFulName, cluster, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, logger, cr.GetNamespace(), objectMetaInfo, - generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service), + generateRedisClusterParams(ctx, cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), - generateRedisClusterContainerParams(cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), + generateRedisClusterContainerParams(ctx, cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, ) if err != nil { @@ -292,7 +294,7 @@ func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisClu } // CreateRedisClusterService method will create service for Redis -func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) CreateRedisClusterService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, cluster, service.RedisServiceRole, cr.ObjectMeta.Labels) @@ -323,7 +325,7 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re if additionalServiceType == "NodePort" { // If NodePort is enabled, we need to create a service for every redis pod. // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. - err = service.createOrUpdateClusterNodePortService(cr, cl) + err = service.createOrUpdateClusterNodePortService(ctx, cr, cl) if err != nil { logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err @@ -337,7 +339,7 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re return nil } -func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) createOrUpdateClusterNodePortService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { replicas := cr.Spec.GetReplicaCounts(service.RedisServiceRole) for i := 0; i < int(replicas); i++ { diff --git a/pkg/k8sutils/redis-cluster_test.go b/pkg/k8sutils/redis-cluster_test.go index 65c99bc4a..84415b0b6 100644 --- a/pkg/k8sutils/redis-cluster_test.go +++ b/pkg/k8sutils/redis-cluster_test.go @@ -1,6 +1,7 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" @@ -165,7 +166,7 @@ func Test_generateRedisClusterParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actualLeaderSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualLeaderSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "leader", ExternalConfig: input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisLeader.SecurityContext, @@ -178,7 +179,7 @@ func Test_generateRedisClusterParams(t *testing.T) { }) assert.EqualValues(t, expectedLeaderSTS, actualLeaderSTS, "Expected %+v, got %+v", expectedLeaderSTS, actualLeaderSTS) - actualFollowerSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualFollowerSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "follower", ExternalConfig: input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisFollower.SecurityContext, @@ -432,10 +433,10 @@ func Test_generateRedisClusterContainerParams(t *testing.T) { } logger := testr.New(t) - actualLeaderContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") + actualLeaderContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), logger, input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") assert.EqualValues(t, expectedLeaderContainer, actualLeaderContainer, "Expected %+v, got %+v", expectedLeaderContainer, actualLeaderContainer) - actualFollowerContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") + actualFollowerContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), logger, input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") assert.EqualValues(t, expectedFollowerContainer, actualFollowerContainer, "Expected %+v, got %+v", expectedFollowerContainer, actualFollowerContainer) } diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index d1287e4f6..14a2363c5 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -12,7 +12,7 @@ import ( ) // CreateReplicationService method will create replication service for Redis -func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { +func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) @@ -62,7 +62,7 @@ func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.I } // CreateReplicationRedis will create a replication redis setup -func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { +func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) @@ -70,6 +70,7 @@ func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Int objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, logger, cr.GetNamespace(), @@ -218,7 +219,7 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { // statefulset name the same as the redis replication name - sts, err := GetStatefulSet(client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) + sts, err := GetStatefulSet(ctx, client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) if err != nil { return false } diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 3d2f4ba6b..12257d29a 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -53,11 +53,11 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge } // Create RedisSentinel Service -func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { prop := RedisSentinelService{ RedisServiceRole: "sentinel", } - return prop.CreateRedisSentinelService(cr, cl) + return prop.CreateRedisSentinelService(ctx, cr, cl) } // Create Redis Sentinel Cluster Setup @@ -67,11 +67,12 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, logger, cr.GetNamespace(), objectMetaInfo, - generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), + generateRedisSentinelParams(ctx, cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), @@ -85,7 +86,7 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl } // Create Redis Sentile Params for the statefulset -func generateRedisSentinelParams(cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { +func generateRedisSentinelParams(ctx context.Context, cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -206,7 +207,7 @@ func (service RedisSentinelSTS) getSentinelCount(cr *redisv1beta2.RedisSentinel) } // Create the Service for redis sentinel -func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, sentinel, service.RedisServiceRole, cr.ObjectMeta.Labels) @@ -337,7 +338,7 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac return "" } for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, &replicationInstance, podName) + redisClient := configureRedisReplicationClient(ctx, client, logger, &replicationInstance, podName) defer redisClient.Close() if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { @@ -354,5 +355,5 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac PodName: realMasterPod, Namespace: replicationNamespace, } - return getRedisServerIP(client, logger, realMasterInfo) + return getRedisServerIP(ctx, client, logger, realMasterInfo) } diff --git a/pkg/k8sutils/redis-sentinel_test.go b/pkg/k8sutils/redis-sentinel_test.go index 76b467c4c..05e57f1c5 100644 --- a/pkg/k8sutils/redis-sentinel_test.go +++ b/pkg/k8sutils/redis-sentinel_test.go @@ -83,7 +83,7 @@ func Test_generateRedisSentinelParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelParams(input, *input.Spec.Size, nil, input.Spec.Affinity) + actual := generateRedisSentinelParams(context.TODO(), input, *input.Spec.Size, nil, input.Spec.Affinity) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index 05a911760..d9a48f226 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -1,6 +1,7 @@ package k8sutils import ( + "context" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/kubernetes" @@ -8,7 +9,7 @@ import ( ) // CreateStandaloneService method will create standalone service for Redis -func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { +func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) var epp exporterPortProvider @@ -52,12 +53,13 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er } // CreateStandaloneRedis will create a standalone redis setup -func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { +func CreateStandaloneRedis(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, logger, cr.GetNamespace(), diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index 5f65e5db3..1dd7ee843 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -26,7 +26,7 @@ type RedisDetails struct { } // getRedisServerIP will return the IP of redis service -func getRedisServerIP(client kubernetes.Interface, logger logr.Logger, redisInfo RedisDetails) string { +func getRedisServerIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, redisInfo RedisDetails) string { logger.V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) redisPod, err := client.CoreV1().Pods(redisInfo.Namespace).Get(context.TODO(), redisInfo.PodName, metav1.GetOptions{}) @@ -53,8 +53,8 @@ func getRedisServerIP(client kubernetes.Interface, logger logr.Logger, redisInfo return redisIP } -func getRedisServerAddress(client kubernetes.Interface, logger logr.Logger, rd RedisDetails, port int) string { - ip := getRedisServerIP(client, logger, rd) +func getRedisServerAddress(ctx context.Context, client kubernetes.Interface, logger logr.Logger, rd RedisDetails, port int) string { + ip := getRedisServerIP(ctx, client, logger, rd) format := "%s:%d" // if ip is IPv6, wrap it in brackets @@ -88,7 +88,7 @@ func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCl // RepairDisconnectedMasters attempts to repair disconnected/failed masters by issuing // a CLUSTER MEET with the updated address of the host func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() return repairDisconnectedMasters(ctx, client, logger, cr, redisClient) } @@ -111,7 +111,7 @@ func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, if err != nil { return err } - ip := getRedisServerIP(client, logger, RedisDetails{ + ip := getRedisServerIP(ctx, client, logger, RedisDetails{ PodName: podName, Namespace: cr.Namespace, }) @@ -133,7 +133,7 @@ func getMasterHostFromClusterNode(node clusterNodesResponse) (string, error) { } // CreateMultipleLeaderRedisCommand will create command for single leader cluster creation -func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "--cluster", "create"} replicas := cr.Spec.GetReplicaCounts("leader") @@ -143,7 +143,7 @@ func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.L if cr.Spec.ClusterVersion != nil && *cr.Spec.ClusterVersion == "v7" { address = getRedisHostname(RedisDetails{PodName: podName, Namespace: cr.Namespace}, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - address = getRedisServerAddress(client, logger, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) + address = getRedisServerAddress(ctx, client, logger, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) } cmd = append(cmd, address) } @@ -165,11 +165,11 @@ func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface } cmd = CreateSingleLeaderRedisCommand(logger, cr) default: - cmd = CreateMultipleLeaderRedisCommand(client, logger, cr) + cmd = CreateMultipleLeaderRedisCommand(ctx, client, logger, cr) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -178,7 +178,7 @@ func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster creation command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []string { @@ -194,7 +194,7 @@ func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []str } // createRedisReplicationCommand will create redis replication creation command -func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { +func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { cmd := []string{"redis-cli", "--cluster", "add-node"} var followerAddress, leaderAddress string @@ -202,14 +202,14 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg followerAddress = getRedisHostname(followerPod, cr, "follower") + fmt.Sprintf(":%d", *cr.Spec.Port) leaderAddress = getRedisHostname(leaderPod, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - followerAddress = getRedisServerAddress(client, logger, followerPod, *cr.Spec.Port) - leaderAddress = getRedisServerAddress(client, logger, leaderPod, *cr.Spec.Port) + followerAddress = getRedisServerAddress(ctx, client, logger, followerPod, *cr.Spec.Port) + leaderAddress = getRedisServerAddress(ctx, client, logger, leaderPod, *cr.Spec.Port) } cmd = append(cmd, followerAddress, leaderAddress, "--cluster-slave") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) } else { @@ -233,7 +233,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() nodes, err := clusterNodes(ctx, redisClient, logger) @@ -250,11 +250,11 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa((followerIdx)%int(leaderCounts)), Namespace: cr.Namespace, } - podIP = getRedisServerIP(client, logger, followerPod) - if !checkRedisNodePresence(cr, nodes, podIP) { + podIP = getRedisServerIP(ctx, client, logger, followerPod) + if !checkRedisNodePresence(ctx, cr, nodes, podIP) { logger.V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) - cmd := createRedisReplicationCommand(client, logger, cr, leaderPod, followerPod) - redisClient := configureRedisClient(client, logger, cr, followerPod.PodName) + cmd := createRedisReplicationCommand(ctx, client, logger, cr, leaderPod, followerPod) + redisClient := configureRedisClient(ctx, client, logger, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() redisClient.Close() if err != nil { @@ -262,7 +262,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter continue } if pong == "PONG" { - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } else { logger.V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) } @@ -319,7 +319,7 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, lo podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { logger.V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) - client := configureRedisClient(client, logger, cr, podName+strconv.Itoa(podCount)) + client := configureRedisClient(ctx, client, logger, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) @@ -349,7 +349,7 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, lo // CheckRedisNodeCount will check the count of redis nodes func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var redisNodeType string clusterNodes, err := clusterNodes(ctx, redisClient, logger) @@ -382,12 +382,12 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge // RedisClusterStatusHealth use `redis-cli --cluster check 127.0.0.1:6379` func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() cmd := []string{"redis-cli", "--cluster", "check", "127.0.0.1:6379"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -395,7 +395,7 @@ func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - out, err := executeCommand1(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + out, err := executeCommand1(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if err != nil { return false } @@ -410,7 +410,7 @@ func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, // UnhealthyNodesInCluster returns the number of unhealthy nodes in the cluster cr func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) (int, error) { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() clusterNodes, err := clusterNodes(ctx, redisClient, logger) if err != nil { @@ -435,7 +435,7 @@ func nodeFailedOrDisconnected(node clusterNodesResponse) bool { } // configureRedisClient will configure the Redis Client -func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { +func configureRedisClient(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -443,25 +443,25 @@ func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *r var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, *cr.Spec.Port), + Addr: getRedisServerAddress(ctx, client, logger, redisInfo, *cr.Spec.Port), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) } return redis.NewClient(opts) } // executeCommand will execute the commands in pod -func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { - execOut, execErr := executeCommand1(client, logger, cr, cmd, podName) +func executeCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { + execOut, execErr := executeCommand1(ctx, client, logger, cr, cmd, podName) if execErr != nil { logger.Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) return @@ -469,7 +469,7 @@ func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1 logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) } -func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { +func executeCommand1(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { var ( execOut bytes.Buffer execErr bytes.Buffer @@ -479,7 +479,7 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv logger.Error(err, "Could not find pod to execute") return "", err } - targetContainer, pod := getContainerID(client, logger, cr, podName) + targetContainer, pod := getContainerID(ctx, client, logger, cr, podName) if targetContainer < 0 { logger.Error(err, "Could not find pod to execute") return "", err @@ -510,7 +510,7 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv } // getContainerID will return the id of container from pod -func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { +func getContainerID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { pod, err := client.CoreV1().Pods(cr.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { logger.Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) @@ -538,7 +538,7 @@ func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1 } // checkRedisNodePresence will check if the redis node exist in cluster or not -func checkRedisNodePresence(cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { +func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) logger.V(1).Info("Checking if Node is in cluster", "Node", nodeName) for _, node := range nodeList { @@ -557,7 +557,7 @@ func generateRedisManagerLogger(namespace, name string) logr.Logger { } // configureRedisClient will configure the Redis Client -func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { +func configureRedisReplicationClient(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -565,25 +565,25 @@ func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Lo var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, 6379), + Addr: getRedisServerAddress(ctx, client, logger, redisInfo, 6379), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) } return redis.NewClient(opts) } // Get Redis nodes by it's role i.e. master, slave and sentinel func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { - statefulset, err := GetStatefulSet(cl, logger, cr.GetNamespace(), cr.GetName()) + statefulset, err := GetStatefulSet(ctx, cl, logger, cr.GetNamespace(), cr.GetName()) if err != nil { logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) } @@ -593,7 +593,7 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - redisClient := configureRedisReplicationClient(cl, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, cl, logger, cr, podName) defer redisClient.Close() podRole := checkRedisServerRole(ctx, redisClient, logger, podName) if podRole == redisRole { @@ -656,11 +656,11 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa Namespace: cr.Namespace, } - realMasterPodIP := getRedisServerIP(client, logger, realMasterInfo) + realMasterPodIP := getRedisServerIP(ctx, client, logger, realMasterInfo) for i := 0; i < len(masterPods); i++ { if masterPods[i] != realMasterPod { - redisClient := configureRedisReplicationClient(client, logger, cr, masterPods[i]) + redisClient := configureRedisReplicationClient(ctx, client, logger, cr, masterPods[i]) defer redisClient.Close() logger.V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() @@ -676,7 +676,7 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, client, logger, cr, podName) defer redisClient.Close() if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { diff --git a/pkg/k8sutils/redis_test.go b/pkg/k8sutils/redis_test.go index 79314e02d..922e80df4 100644 --- a/pkg/k8sutils/redis_test.go +++ b/pkg/k8sutils/redis_test.go @@ -49,7 +49,7 @@ func TestCheckRedisNodePresence(t *testing.T) { for _, tt := range tests { testname := fmt.Sprintf("%s,%s", tt.nodes, tt.ip) t.Run(testname, func(t *testing.T) { - ans := checkRedisNodePresence(cr, tt.nodes, tt.ip) + ans := checkRedisNodePresence(context.TODO(), cr, tt.nodes, tt.ip) if ans != tt.want { t.Errorf("got %t, want %t", ans, tt.want) } @@ -176,7 +176,7 @@ func TestGetRedisServerIP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { client := tt.setup() logger := testr.New(t) - redisIP := getRedisServerIP(client, logger, tt.redisInfo) + redisIP := getRedisServerIP(context.TODO(), client, logger, tt.redisInfo) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty IP address") @@ -241,7 +241,7 @@ func TestGetRedisServerAddress(t *testing.T) { t.Run(tt.name, func(t *testing.T) { client := tt.setup() logger := testr.New(t) - redisIP := getRedisServerAddress(client, logger, tt.redisInfo, 6379) + redisIP := getRedisServerAddress(context.TODO(), client, logger, tt.redisInfo, 6379) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty address") @@ -355,7 +355,7 @@ func TestCreateMultipleLeaderRedisCommand(t *testing.T) { client := mock_utils.CreateFakeClientWithPodIPs_LeaderPods(tt.redisCluster) logger := testr.New(t) - cmd := CreateMultipleLeaderRedisCommand(client, logger, tt.redisCluster) + cmd := CreateMultipleLeaderRedisCommand(context.TODO(), client, logger, tt.redisCluster) assert.Equal(t, tt.expectedCommands, cmd) }) } @@ -530,7 +530,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) { objects = append(objects, secret...) client := fake.NewSimpleClientset(objects...) - cmd := createRedisReplicationCommand(client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod) + cmd := createRedisReplicationCommand(context.TODO(), client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod) // Assert the command is as expected using testify assert.Equal(t, tt.expectedCommand, cmd) @@ -615,7 +615,7 @@ func TestGetContainerID(t *testing.T) { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.setupPod) logger := testr.New(t) - id, pod := getContainerID(client, logger, test.redisCluster, test.setupPod.Name) + id, pod := getContainerID(context.TODO(), client, logger, test.redisCluster, test.setupPod.Name) if test.expectError { assert.Nil(t, pod, "Expected no pod but got one") assert.Equal(t, test.expectedID, id, "Expected ID does not match") diff --git a/pkg/k8sutils/secrets.go b/pkg/k8sutils/secrets.go index 91cc852c2..3f67a522c 100644 --- a/pkg/k8sutils/secrets.go +++ b/pkg/k8sutils/secrets.go @@ -16,7 +16,7 @@ import ( var log = logf.Log.WithName("controller_redis") // getRedisPassword method will return the redis password from the secret -func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { +func getRedisPassword(ctx context.Context, client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { secretName, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { logger.Error(err, "Failed in getting existing secret for redis") @@ -32,7 +32,7 @@ func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace return "", nil } -func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, namespace, tlsSecretName, podName string) *tls.Config { +func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, logger logr.Logger, namespace, tlsSecretName, podName string) *tls.Config { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), tlsSecretName, metav1.GetOptions{}) if err != nil { logger.V(1).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) diff --git a/pkg/k8sutils/secrets_test.go b/pkg/k8sutils/secrets_test.go index cb1734f6f..f9e8241bd 100644 --- a/pkg/k8sutils/secrets_test.go +++ b/pkg/k8sutils/secrets_test.go @@ -1,6 +1,7 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" @@ -85,7 +86,7 @@ func Test_getRedisPassword(t *testing.T) { t.Run(tt.name, func(t *testing.T) { client := tt.setup() logger := testr.New(t) - got, err := getRedisPassword(client, logger, tt.namespace, tt.secretName, tt.secretKey) + got, err := getRedisPassword(context.TODO(), client, logger, tt.namespace, tt.secretName, tt.secretKey) if tt.expectedErr { require.Error(t, err, "Expected an error but didn't get one") @@ -222,7 +223,7 @@ func Test_getRedisTLSConfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { client := tt.setup() logger := testr.New(t) - tlsConfig := getRedisTLSConfig(client, logger, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) + tlsConfig := getRedisTLSConfig(context.TODO(), client, logger, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) if tt.expectTLS { require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index bc31d4975..862b435ca 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -154,8 +154,8 @@ type initContainerParameters struct { } // CreateOrUpdateStateFul method will create or update Redis service -func CreateOrUpdateStateFul(cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { - storedStateful, err := GetStatefulSet(cl, logger, namespace, stsMeta.Name) +func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { + storedStateful, err := GetStatefulSet(ctx, cl, logger, namespace, stsMeta.Name) statefulSetDef := generateStatefulSetsDef(stsMeta, params, ownerDef, initcontainerParams, containerParams, getSidecars(sidecars)) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(statefulSetDef); err != nil { //nolint @@ -163,15 +163,15 @@ func CreateOrUpdateStateFul(cl kubernetes.Interface, logger logr.Logger, namespa return err } if apierrors.IsNotFound(err) { - return createStatefulSet(cl, logger, namespace, statefulSetDef) + return createStatefulSet(ctx, cl, logger, namespace, statefulSetDef) } return err } - return patchStatefulSet(storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) + return patchStatefulSet(ctx, storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) } // patchStateFulSet will patch Redis Kubernetes StateFulSet -func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { +func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { logger := statefulSetLogger(namespace, storedStateful.Name) // We want to try and keep this atomic as possible. newStateful.ResourceVersion = storedStateful.ResourceVersion @@ -268,7 +268,7 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St logger.Error(err, "Unable to patch redis statefulset with comparison object") return err } - return updateStatefulSet(cl, logger, namespace, newStateful, recreateStateFulSet) + return updateStatefulSet(ctx, cl, logger, namespace, newStateful, recreateStateFulSet) } logger.V(1).Info("Reconciliation Complete, no Changes required.") return nil @@ -767,7 +767,7 @@ func getEnvironmentVariables(role string, enabledPassword *bool, secretName *str } // createStatefulSet is a method to create statefulset in Kubernetes -func createStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { +func createStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { _, err := cl.AppsV1().StatefulSets(namespace).Create(context.TODO(), stateful, metav1.CreateOptions{}) if err != nil { logger.Error(err, "Redis stateful creation failed") @@ -778,7 +778,7 @@ func createStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st } // updateStatefulSet is a method to update statefulset in Kubernetes -func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { +func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { _, err := cl.AppsV1().StatefulSets(namespace).Update(context.TODO(), stateful, metav1.UpdateOptions{}) if recreateStateFulSet { sErr, ok := err.(*apierrors.StatusError) @@ -803,7 +803,7 @@ func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st } // GetStateFulSet is a method to get statefulset in Kubernetes -func GetStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { +func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("StatefulSet", "apps/v1"), } diff --git a/pkg/k8sutils/statefulset_test.go b/pkg/k8sutils/statefulset_test.go index f7d8ea7ae..181e58276 100644 --- a/pkg/k8sutils/statefulset_test.go +++ b/pkg/k8sutils/statefulset_test.go @@ -229,7 +229,7 @@ func Test_GetStatefulSet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) - _, err := GetStatefulSet(client, logger, test.stsNamespace, test.stsName) + _, err := GetStatefulSet(context.TODO(), client, logger, test.stsNamespace, test.stsName) if test.present { assert.Nil(t, err) } else { @@ -279,7 +279,7 @@ func Test_createStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := createStatefulSet(client, logger, test.sts.GetNamespace(), &test.sts) + err := createStatefulSet(context.TODO(), client, logger, test.sts.GetNamespace(), &test.sts) if test.present { assert.NotNil(t, err) } else { @@ -395,7 +395,7 @@ func TestUpdateStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := updateStatefulSet(client, logger, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) + err := updateStatefulSet(context.TODO(), client, logger, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -554,7 +554,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -577,7 +577,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { client = k8sClientFake.NewSimpleClientset() - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) assert.Nil(err) } }) diff --git a/pkg/k8sutils/status.go b/pkg/k8sutils/status.go index aa19ea150..16d952e19 100644 --- a/pkg/k8sutils/status.go +++ b/pkg/k8sutils/status.go @@ -21,7 +21,7 @@ func statusLogger(namespace string, name string) logr.Logger { } // UpdateRedisClusterStatus will update the status of the RedisCluster -func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { +func UpdateRedisClusterStatus(ctx context.Context, cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { logger := statusLogger(cr.Namespace, cr.Name) newStatus := redisv1beta2.RedisClusterStatus{ State: state, From 95b5c5786427e5c2c8b7d4b452a7d94d8e59a6ec Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 16:03:39 +0800 Subject: [PATCH 3/8] remove logger Signed-off-by: drivebyer --- pkg/controllers/redis/redis_controller.go | 16 +- .../rediscluster/rediscluster_controller.go | 112 ++++---- .../redisreplication_controller.go | 32 +-- .../redissentinel/redissentinel_controller.go | 22 +- pkg/controllerutil/controller_common.go | 15 +- pkg/k8sutils/cluster-scaling.go | 174 ++++++------ pkg/k8sutils/cluster-scaling_test.go | 13 +- pkg/k8sutils/finalizer.go | 38 +-- pkg/k8sutils/finalizer_test.go | 29 +- pkg/k8sutils/poddisruption.go | 76 +++-- pkg/k8sutils/redis-cluster.go | 34 +-- pkg/k8sutils/redis-cluster_test.go | 6 +- pkg/k8sutils/redis-replication.go | 33 +-- pkg/k8sutils/redis-sentinel.go | 55 ++-- pkg/k8sutils/redis-sentinel_test.go | 4 +- pkg/k8sutils/redis-standalone.go | 17 +- pkg/k8sutils/redis.go | 259 +++++++++--------- pkg/k8sutils/redis_test.go | 37 +-- pkg/k8sutils/secrets.go | 21 +- pkg/k8sutils/secrets_test.go | 9 +- pkg/k8sutils/services.go | 51 ++-- pkg/k8sutils/services_test.go | 13 +- pkg/k8sutils/statefulset.go | 64 ++--- pkg/k8sutils/statefulset_test.go | 17 +- pkg/k8sutils/status.go | 13 +- 25 files changed, 541 insertions(+), 619 deletions(-) diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 390171354..531261164 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -47,29 +47,29 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis finalizer") + if err = k8sutils.HandleRedisFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redis.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } err = k8sutils.CreateStandaloneRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create redis") + return intctrlutil.RequeueWithError(ctx, err, "failed to create redis") } err = k8sutils.CreateStandaloneService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create service") + return intctrlutil.RequeueWithError(ctx, err, "failed to create service") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "requeue after 10 seconds") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "requeue after 10 seconds") } // SetupWithManager sets up the controller with the Manager. diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index 895e5f918..ce81f0a9b 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -19,6 +19,7 @@ package rediscluster import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/log" "time" "github.com/OT-CONTAINER-KIT/redis-operator/api/status" @@ -46,22 +47,21 @@ type Reconciler struct { } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.V(1).Info("Reconciling opstree redis Cluster controller") + logger := log.FromContext(ctx) instance := &redisv1beta2.RedisCluster{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis cluster instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis cluster instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisClusterFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis cluster finalizer") + if err = k8sutils.HandleRedisClusterFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis cluster finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["rediscluster.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } instance.SetDefault() @@ -70,33 +70,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu totalReplicas := leaderReplicas + followerReplicas if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } // Check if the cluster is downscaled - if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader"); leaderReplicas < leaderCount { - reqLogger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) + if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader"); leaderReplicas < leaderCount { + logger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) for shardIdx := leaderCount - 1; shardIdx >= leaderReplicas; shardIdx-- { - reqLogger.Info("Remove the shard", "Shard.Index", shardIdx) + logger.Info("Remove the shard", "Shard.Index", shardIdx) // Imp if the last index of leader sts is not leader make it then // check whether the redis is leader or not ? // if not true then make it leader pod - if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, r.Log, instance)) { + if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, instance)) { // lastLeaderPod is slaving right now Make it the master Pod // We have to bring a manual failover here to make it a leaderPod // clusterFailover should also include the clusterReplicate since we have to map the followers to new leader - k8sutils.ClusterFailover(ctx, r.K8sClient, r.Log, instance) + k8sutils.ClusterFailover(ctx, r.K8sClient, instance) } // Step 1 Remove the Follower Node - k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, instance) // Step 2 Reshard the Cluster - k8sutils.ReshardRedisCluster(ctx, r.K8sClient, r.Log, instance, true) + k8sutils.ReshardRedisCluster(ctx, r.K8sClient, instance, true) } - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster") + logger.Info("Redis cluster is downscaled... Rebalancing the cluster") // Step 3 Rebalance the cluster - k8sutils.RebalanceRedisCluster(ctx, r.K8sClient, r.Log, instance) - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + k8sutils.RebalanceRedisCluster(ctx, r.K8sClient, instance) + logger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // Mark the cluster status as initializing if there are no leader or follower nodes @@ -104,23 +104,23 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu instance.Status.ReadyLeaderReplicas != leaderReplicas { err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } if leaderReplicas != 0 { err = k8sutils.CreateRedisLeaderService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } err = k8sutils.CreateRedisLeader(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") { @@ -129,23 +129,23 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu instance.Status.ReadyFollowerReplicas != followerReplicas { err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } // if we have followers create their service. if followerReplicas != 0 { err = k8sutils.CreateRedisFollowerService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } err = k8sutils.CreateRedisFollower(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } @@ -157,55 +157,55 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) { err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, ""); nc != totalReplicas { - reqLogger.Info("Creating redis cluster by executing cluster creation commands") - leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader") + if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, ""); nc != totalReplicas { + logger.Info("Creating redis cluster by executing cluster creation commands") + leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader") if leaderCount != leaderReplicas { - reqLogger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) + logger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) if leaderCount <= 2 { - k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, r.Log, instance) + k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, instance) } else { if leaderCount < leaderReplicas { // Scale up the cluster // Step 2 : Add Redis Node - k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, instance) // Step 3 Rebalance the cluster using the empty masters - k8sutils.RebalanceRedisClusterEmptyMasters(ctx, r.K8sClient, r.Log, instance) + k8sutils.RebalanceRedisClusterEmptyMasters(ctx, r.K8sClient, instance) } } } else { if followerReplicas > 0 { - reqLogger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) - k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, r.Log, instance) + logger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, instance) } else { - reqLogger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + logger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) + return intctrlutil.RequeueAfter(ctx, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) } - reqLogger.V(1).Info("Number of Redis nodes match desired") - unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + logger.Info("Number of Redis nodes match desired") + unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if err != nil { - reqLogger.Error(err, "failed to determine unhealthy node count in cluster") + logger.Error(err, "failed to determine unhealthy node count in cluster") } if int(totalReplicas) > 1 && unhealthyNodeCount >= int(totalReplicas)-1 { err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - reqLogger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") - if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, r.Log, instance); err != nil { - reqLogger.Error(err, "failed to repair disconnected masters") + logger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") + if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, instance); err != nil { + logger.Error(err, "failed to repair disconnected masters") } err = retry.Do(func() error { - nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if nErr != nil { return nErr } @@ -216,30 +216,30 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu }, retry.Attempts(3), retry.Delay(time.Second*5)) if err == nil { - reqLogger.Info("repairing unhealthy masters successful, no unhealthy masters left") - return intctrlutil.RequeueAfter(reqLogger, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") + logger.Info("repairing unhealthy masters successful, no unhealthy masters left") + return intctrlutil.RequeueAfter(ctx, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") } - reqLogger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") - if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + logger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") + if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } } // Check If there is No Empty Master Node - if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") == totalReplicas { - k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, r.Log, instance) + if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "") == totalReplicas { + k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, instance) } // Mark the cluster status as ready if all the leader and follower nodes are ready if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas { - if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, r.Log, instance) { + if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, instance) { err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // SetupWithManager sets up the controller with the Manager. diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 3436ee8ba..4410f021a 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -35,53 +35,53 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } err = k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } err = k8sutils.CreateReplicationService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if !r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name) { return intctrlutil.Reconciled() } var realMaster string - masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "master") + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") if len(masterNodes) > 1 { reqLogger.Info("Creating redis replication by executing replication creation commands") - slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "slave") - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if len(slaveNodes) == 0 { realMaster = masterNodes[0] } - if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, r.Log, instance, masterNodes, realMaster); err != nil { - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "") + if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, instance, masterNodes, realMaster); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") } } - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if err = r.UpdateRedisReplicationMaster(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if err = r.UpdateRedisPodRoleLabel(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } func (r *Reconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { diff --git a/pkg/controllers/redissentinel/redissentinel_controller.go b/pkg/controllers/redissentinel/redissentinel_controller.go index 9b2ca19a1..e33b5647d 100644 --- a/pkg/controllers/redissentinel/redissentinel_controller.go +++ b/pkg/controllers/redissentinel/redissentinel_controller.go @@ -34,28 +34,28 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisSentinelFinalizer(ctx, r.Client, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisSentinelFinalizer(ctx, r.Client, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redissentinel.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } // Get total Sentinel Replicas // sentinelReplicas := instance.Spec.GetSentinelCounts("sentinel") if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, reqLogger, r.K8sClient, r.Dk8sClient, instance) { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "Redis Replication is specified but not ready") + if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, r.K8sClient, r.Dk8sClient, instance) { + return intctrlutil.RequeueAfter(ctx, time.Second*10, "Redis Replication is specified but not ready") } if instance.Spec.RedisSentinelConfig != nil { @@ -70,20 +70,20 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Create Redis Sentinel - err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient, r.Dk8sClient) + err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, instance, r.K8sClient, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } err = k8sutils.ReconcileSentinelPodDisruptionBudget(ctx, instance, instance.Spec.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } // Create the Service for Redis Sentinel err = k8sutils.CreateRedisSentinelService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } diff --git a/pkg/controllerutil/controller_common.go b/pkg/controllerutil/controller_common.go index ba063060b..1b5c7f4d2 100644 --- a/pkg/controllerutil/controller_common.go +++ b/pkg/controllerutil/controller_common.go @@ -1,9 +1,10 @@ package controllerutil import ( + "context" + "sigs.k8s.io/controller-runtime/pkg/log" "time" - "github.com/go-logr/logr" apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -12,29 +13,29 @@ func Reconciled() (reconcile.Result, error) { return reconcile.Result{}, nil } -func RequeueAfter(logger logr.Logger, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueAfter(ctx context.Context, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { keysAndValues = append(keysAndValues, "duration", duration.String()) if msg == "" { msg = "requeue-after" } - logger.V(1).Info(msg, keysAndValues...) + log.FromContext(ctx).Info(msg, keysAndValues...) return reconcile.Result{ Requeue: true, RequeueAfter: duration, }, nil } -func RequeueWithError(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if msg == "" { msg = "requeue with error" } - logger.Error(err, msg, keysAndValues...) + log.FromContext(ctx).Error(err, msg, keysAndValues...) return reconcile.Result{}, err } -func RequeueWithErrorChecking(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithErrorChecking(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if apierrors.IsNotFound(err) { return Reconciled() } - return RequeueWithError(err, logger, msg, keysAndValues...) + return RequeueWithError(ctx, err, msg, keysAndValues...) } diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index cae21621b..3b7044e0a 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -3,11 +3,11 @@ package k8sutils import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" "k8s.io/client-go/kubernetes" ) @@ -15,12 +15,12 @@ import ( // ReshardRedisCluster transfer the slots from the last node to the first node. // // NOTE: when all slot been transferred, the node become slave of the first master node. -func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, remove bool) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") // Transfer Pod details transferPOD := RedisDetails{ @@ -37,13 +37,13 @@ func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, logge if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(transferPOD, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, transferPOD, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, transferPOD, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -54,41 +54,41 @@ func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, logge //--cluster-from --cluster-to --cluster-slots --cluster-yes // Remove Node - removeNodeID := getRedisNodeID(ctx, client, logger, cr, removePOD) + removeNodeID := getRedisNodeID(ctx, client, cr, removePOD) cmd = append(cmd, "--cluster-from") cmd = append(cmd, removeNodeID) // Transfer Node - transferNodeID := getRedisNodeID(ctx, client, logger, cr, transferPOD) + transferNodeID := getRedisNodeID(ctx, client, cr, transferPOD) cmd = append(cmd, "--cluster-to") cmd = append(cmd, transferNodeID) // Cluster Slots - slot := getRedisClusterSlots(ctx, redisClient, logger, removeNodeID) + slot := getRedisClusterSlots(ctx, redisClient, removeNodeID) cmd = append(cmd, "--cluster-slots") cmd = append(cmd, slot) cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster reshard command is", "Command", cmd) + log.FromContext(ctx).Info("Redis cluster reshard command is", "Command", cmd) if slot == "0" { - logger.V(1).Info("Skipped the execution of", "Cmd", cmd) + log.FromContext(ctx).Info("Skipped the execution of", "Cmd", cmd) return } - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if remove { - RemoveRedisNodeFromCluster(ctx, client, logger, cr, removePOD) + RemoveRedisNodeFromCluster(ctx, client, cr, removePOD) } } -func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger logr.Logger, nodeID string) string { +func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, nodeID string) string { totalSlots := 0 redisSlots, err := redisClient.ClusterSlots(ctx).Result() if err != nil { - logger.Error(err, "Failed to Get Cluster Slots") + log.FromContext(ctx).Error(err, "Failed to Get Cluster Slots") return "" } for _, slot := range redisSlots { @@ -101,39 +101,39 @@ func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger } } - logger.V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) + log.FromContext(ctx).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) return strconv.Itoa(totalSlots) } // getRedisNodeID would return nodeID of a redis node by passing pod -func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { - redisClient := configureRedisClient(ctx, client, logger, cr, pod.PodName) +func getRedisNodeID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { + redisClient := configureRedisClient(ctx, client, cr, pod.PodName) defer redisClient.Close() pong, err := redisClient.Ping(ctx).Result() if err != nil || pong != "PONG" { - logger.Error(err, "Failed to ping Redis server") + log.FromContext(ctx).Error(err, "Failed to ping Redis server") return "" } cmd := redis.NewStringCmd(ctx, "cluster", "myid") err = redisClient.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } - logger.V(1).Info("Redis node ID ", "is", output) + log.FromContext(ctx).Info("Redis node ID ", "is", output) return output } // Rebalance the Redis CLuster using the Empty Master Nodes -func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : --cluster-use-empty-masters -a var cmd []string pod := RedisDetails{ @@ -145,15 +145,15 @@ func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.In if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } cmd = append(cmd, "--cluster-use-empty-masters") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -161,13 +161,13 @@ func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.In cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } -func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, cr, "leader") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() for i := 0; i < int(totalRedisLeaderNodes); i++ { @@ -175,19 +175,19 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logge PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(i), Namespace: cr.Namespace, } - podNodeID := getRedisNodeID(ctx, client, logger, cr, pod) - podSlots := getRedisClusterSlots(ctx, redisClient, logger, podNodeID) + podNodeID := getRedisNodeID(ctx, client, cr, pod) + podSlots := getRedisClusterSlots(ctx, redisClient, podNodeID) if podSlots == "0" || podSlots == "" { - logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) - RebalanceRedisClusterEmptyMasters(ctx, client, logger, cr) + log.FromContext(ctx).Info("Found Empty Redis Leader Node", "pod", pod) + RebalanceRedisClusterEmptyMasters(ctx, client, cr) break } } } // Rebalance Redis Cluster Would Rebalance the Redis Cluster without using the empty masters -func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : -a var cmd []string pod := RedisDetails{ @@ -199,13 +199,13 @@ func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, log if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -213,14 +213,14 @@ func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } // Add redis cluster node would add a node to the existing redis cluster using redis-cli -func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - activeRedisNode := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + activeRedisNode := CheckRedisNodeCount(ctx, client, cr, "leader") newPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(int(activeRedisNode)), @@ -237,14 +237,14 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisHostname(newPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, newPod, *cr.Spec.Port)) - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, newPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -252,16 +252,16 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster add-node command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).Info("Redis cluster add-node command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader -func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, logger logr.Logger, masterNodeID string) []string { +func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, masterNodeID string) []string { // 3acb029fead40752f432c84f9bed2e639119a573 192.168.84.239:6379@16379,redis-cluster-v1beta2-follower-5 slave e3299968586dd457a8dba04fc6c747cecd38510f 0 1713595736542 6 connected slaveNodes, err := redisClient.ClusterSlaves(ctx, masterNodeID).Result() if err != nil { - logger.Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) + log.FromContext(ctx).Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) return nil } slaveIDs := make([]string, 0, len(slaveNodes)) @@ -269,16 +269,16 @@ func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, stringSlice := strings.Split(slave, " ") slaveIDs = append(slaveIDs, stringSlice[0]) } - logger.V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) + log.FromContext(ctx).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) return slaveIDs } // Remove redis follower node would remove all follower nodes of last leader node using redis-cli -func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -292,39 +292,39 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. cmd = []string{"redis-cli"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod) - followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, logger, lastLeaderPodNodeID) + lastLeaderPodNodeID := getRedisNodeID(ctx, client, cr, lastLeaderPod) + followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, lastLeaderPodNodeID) cmd = append(cmd, "--cluster", "del-node") if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } for _, followerNodeID := range followerNodeIDs { cmd = append(cmd, followerNodeID) - logger.V(1).Info("Redis cluster follower remove command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).Info("Redis cluster follower remove command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") cmd = cmd[:len(cmd)-1] } } // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli -func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { +func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { var cmd []string - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - // currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + // currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -340,16 +340,16 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } - removePodNodeID := getRedisNodeID(ctx, client, logger, cr, removePod) + removePodNodeID := getRedisNodeID(ctx, client, cr, removePod) cmd = append(cmd, removePodNodeID) if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -357,26 +357,26 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd) - if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" { - logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) + log.FromContext(ctx).Info("Redis cluster leader remove command is", "Command", cmd) + if getRedisClusterSlots(ctx, redisClient, removePodNodeID) != "0" { + log.FromContext(ctx).Info("Skipping execution remove leader not empty", "cmd", cmd) } - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // verifyLeaderPod return true if the pod is leader/master -func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) - redisClient := configureRedisClient(ctx, client, logger, cr, podName) + redisClient := configureRedisClient(ctx, client, cr, podName) defer redisClient.Close() - return verifyLeaderPodInfo(ctx, redisClient, logger, podName) + return verifyLeaderPodInfo(ctx, redisClient, podName) } -func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) bool { +func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, podName string) bool { info, err := redisClient.Info(ctx, "replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return false } @@ -391,8 +391,8 @@ func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger return false } -func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func ClusterFailover(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) // cmd = redis-cli cluster failover -a var cmd []string pod := RedisDetails{ @@ -405,13 +405,13 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(ctx, client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -419,6 +419,6 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, slavePodName)...) - logger.V(1).Info("Redis cluster failover command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, slavePodName) + log.FromContext(ctx).Info("Redis cluster failover command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, slavePodName) } diff --git a/pkg/k8sutils/cluster-scaling_test.go b/pkg/k8sutils/cluster-scaling_test.go index fe1bd3bd0..69fa1fba0 100644 --- a/pkg/k8sutils/cluster-scaling_test.go +++ b/pkg/k8sutils/cluster-scaling_test.go @@ -5,15 +5,12 @@ import ( "fmt" "testing" - "github.com/go-logr/logr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" ) func Test_verifyLeaderPodInfo(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string section string @@ -52,7 +49,7 @@ func Test_verifyLeaderPodInfo(t *testing.T) { mock.ExpectInfo(tt.section).SetVal(tt.response) } - result := verifyLeaderPodInfo(ctx, client, logger, "test-pod") + result := verifyLeaderPodInfo(ctx, client, "test-pod") assert.Equal(t, tt.expectedBool, result, "Test case: "+tt.name) @@ -64,8 +61,6 @@ func Test_verifyLeaderPodInfo(t *testing.T) { } func Test_getRedisClusterSlots(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string nodeID string @@ -138,7 +133,7 @@ func Test_getRedisClusterSlots(t *testing.T) { mock.ExpectClusterSlots().SetVal(tt.clusterSlots) } - result := getRedisClusterSlots(ctx, client, logger, tt.nodeID) + result := getRedisClusterSlots(ctx, client, tt.nodeID) assert.Equal(t, tt.expectedResult, result, "Test case: "+tt.name) @@ -150,8 +145,6 @@ func Test_getRedisClusterSlots(t *testing.T) { } func Test_getAttachedFollowerNodeIDs(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string masterNodeID string @@ -209,7 +202,7 @@ func Test_getAttachedFollowerNodeIDs(t *testing.T) { mock.ExpectClusterSlaves(tt.masterNodeID).SetVal(tt.slaveNodeIDs) } - result := getAttachedFollowerNodeIDs(ctx, client, logger, tt.masterNodeID) + result := getAttachedFollowerNodeIDs(ctx, client, tt.masterNodeID) assert.ElementsMatch(t, tt.expectedslaveNodeIDs, result, "Test case: "+tt.name) diff --git a/pkg/k8sutils/finalizer.go b/pkg/k8sutils/finalizer.go index e16522d14..6331fd319 100644 --- a/pkg/k8sutils/finalizer.go +++ b/pkg/k8sutils/finalizer.go @@ -5,13 +5,13 @@ import ( "fmt" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -22,17 +22,17 @@ const ( ) // HandleRedisFinalizer finalize resource if instance is marked to be deleted -func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.Redis) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisPVC(ctx, k8sClient, logger, cr); err != nil { + if err := finalizeRedisPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) return err } } @@ -41,17 +41,17 @@ func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClie } // HandleRedisClusterFinalizer finalize resource if instance is marked to be deleted -func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisClusterFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisClusterPVC(ctx, k8sClient, logger, cr); err != nil { + if err := finalizeRedisClusterPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisClusterFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisClusterFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisClusterFinalizer) return err } } @@ -60,17 +60,17 @@ func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, } // Handle RedisReplicationFinalizer finalize resource if instance is marked to be deleted -func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisReplicationFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisReplicationPVC(ctx, k8sClient, logger, cr); err != nil { + if err := finalizeRedisReplicationPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisReplicationFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) return err } } @@ -79,12 +79,12 @@ func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Clie } // HandleRedisSentinelFinalizer finalize resource if instance is marked to be deleted -func HandleRedisSentinelFinalizer(ctx context.Context, ctrlclient client.Client, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { +func HandleRedisSentinelFinalizer(ctx context.Context, ctrlclient client.Client, cr *redisv1beta2.RedisSentinel) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisSentinelFinalizer) { controllerutil.RemoveFinalizer(cr, RedisSentinelFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) return err } } @@ -102,26 +102,26 @@ func AddFinalizer(ctx context.Context, cr client.Object, finalizer string, cl cl } // finalizeRedisPVC delete PVC -func finalizeRedisPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func finalizeRedisPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.Redis) error { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-0", pvcTemplateName, cr.Name) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) return err } return nil } // finalizeRedisClusterPVC delete PVCs -func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { for _, role := range []string{"leader", "follower"} { for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name+"-"+role) PVCName := fmt.Sprintf("%s-%s-%s-%d", pvcTemplateName, cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -130,7 +130,7 @@ func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, l PVCName := fmt.Sprintf("%s-%s-%s-%d", "node-conf", cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -140,13 +140,13 @@ func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, l } // finalizeRedisReplicationPVC delete PVCs -func finalizeRedisReplicationPVC(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func finalizeRedisReplicationPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-%d", pvcTemplateName, cr.Name, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index 8e2e67816..3f6e7cfb2 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -9,7 +9,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mockClient "github.com/OT-CONTAINER-KIT/redis-operator/mocks/client" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -132,7 +131,7 @@ func TestHandleRedisFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(tc.existingPVC.DeepCopyObject()) @@ -147,7 +146,7 @@ func TestHandleRedisFinalizer(t *testing.T) { assert.NoError(t, err) } - err := HandleRedisFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -262,7 +261,7 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -279,7 +278,7 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { } } - err := HandleRedisClusterFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisClusterFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -461,7 +460,7 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -478,7 +477,7 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { } } - err := HandleRedisReplicationFinalizer(context.TODO(), tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisReplicationFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -557,8 +556,8 @@ func TestHandleRedisSentinelFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) - err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, logger, tc.cr) + + err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -597,7 +596,7 @@ func TestFinalizeRedisPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + cr := &v1beta2.Redis{ ObjectMeta: metav1.ObjectMeta{ Name: "test-redis", @@ -618,7 +617,7 @@ func TestFinalizeRedisPVC(t *testing.T) { assert.NoError(t, err) } - err := finalizeRedisPVC(context.TODO(), k8sClient, logger, cr) + err := finalizeRedisPVC(context.TODO(), k8sClient, cr) if tc.expectError { assert.Error(t, err) assert.Equal(t, tc.errorExpected, err) @@ -694,7 +693,7 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -702,7 +701,7 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisReplicationPVC(context.TODO(), k8sClient, logger, tc.redisReplication) + err := finalizeRedisReplicationPVC(context.TODO(), k8sClient, tc.redisReplication) if tc.expectError { assert.Error(t, err) } else { @@ -765,7 +764,7 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -773,7 +772,7 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisClusterPVC(context.TODO(), k8sClient, logger, tc.redisCluster) + err := finalizeRedisClusterPVC(context.TODO(), k8sClient, tc.redisCluster) if tc.expectError { assert.Error(t, err) } else { diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 18ea93e43..395d2eaab 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -3,11 +3,11 @@ package k8sutils import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/log" commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,20 +18,19 @@ import ( // CreateRedisLeaderPodDisruptionBudget check and create a PodDisruptionBudget for Leaders func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-" + role - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, cluster, role, cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) pdbDef := generatePodDisruptionBudgetDef(ctx, cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -41,20 +40,19 @@ func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.Red func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-sentinel" - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, sentinel, "sentinel", cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) pdbDef := generateSentinelPodDisruptionBudgetDef(ctx, cr, "sentinel", pdbMeta, pdbParams) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -117,25 +115,23 @@ func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta } // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdatePodDisruptionBudget(pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(pdbDef.Namespace, pdbDef.Name) - storedPDB, err := GetPodDisruptionBudget(pdbDef.Namespace, pdbDef.Name, cl) +func CreateOrUpdatePodDisruptionBudget(ctx context.Context, pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { + storedPDB, err := GetPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef.Name, cl) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(pdbDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } if errors.IsNotFound(err) { - return createPodDisruptionBudget(pdbDef.Namespace, pdbDef, cl) + return createPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef, cl) } return err } - return patchPodDisruptionBudget(storedPDB, pdbDef, pdbDef.Namespace, cl) + return patchPodDisruptionBudget(ctx, storedPDB, pdbDef, pdbDef.Namespace, cl) } // patchPodDisruptionBudget will patch Redis Kubernetes PodDisruptionBudgets -func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, storedPdb.Name) +func patchPodDisruptionBudget(ctx context.Context, storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newPdb.ResourceVersion = storedPdb.ResourceVersion newPdb.CreationTimestamp = storedPdb.CreationTimestamp @@ -151,11 +147,11 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p patch.IgnoreStatusFields(), ) if err != nil { - logger.Error(err, "Unable to patch redis PodDisruption with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruption with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", + log.FromContext(ctx).Info("Changes in PodDisruptionBudget Detected, Updating...", "patch", string(patchResult.Patch), "Current", string(patchResult.Current), "Original", string(patchResult.Original), @@ -167,67 +163,57 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newPdb); err != nil { - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } - return updatePodDisruptionBudget(namespace, newPdb, cl) + return updatePodDisruptionBudget(ctx, namespace, newPdb, cl) } return nil } // createPodDisruptionBudget is a method to create PodDisruptionBudgets in Kubernetes -func createPodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func createPodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget creation failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget creation failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget creation was successful") + log.FromContext(ctx).Info("Redis PodDisruptionBudget creation was successful") return nil } // updatePodDisruptionBudget is a method to update PodDisruptionBudgets in Kubernetes -func updatePodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func updatePodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Update(context.TODO(), pdb, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget update failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget update failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) + log.FromContext(ctx).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) return nil } // deletePodDisruptionBudget is a method to delete PodDisruptionBudgets in Kubernetes -func deletePodDisruptionBudget(namespace string, pdbName string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdbName) +func deletePodDisruptionBudget(ctx context.Context, namespace string, pdbName string, cl kubernetes.Interface) error { err := cl.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdbName, metav1.DeleteOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruption deletion failed") + log.FromContext(ctx).Error(err, "Redis PodDisruption deletion failed") return err } - logger.V(1).Info("Redis PodDisruption delete was successful") + log.FromContext(ctx).Info("Redis PodDisruption delete was successful") return nil } // GetPodDisruptionBudget is a method to get PodDisruptionBudgets in Kubernetes -func GetPodDisruptionBudget(namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { - logger := pdbLogger(namespace, pdb) +func GetPodDisruptionBudget(ctx context.Context, namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("PodDisruptionBudget", "policy/v1"), } pdbInfo, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), pdb, getOpts) if err != nil { - logger.V(1).Info("Redis PodDisruptionBudget get action failed") + log.FromContext(ctx).Info("Redis PodDisruptionBudget get action failed") return nil, err } - logger.V(1).Info("Redis PodDisruptionBudget get action was successful") + log.FromContext(ctx).Info("Redis PodDisruptionBudget get action was successful") return pdbInfo, err } - -// pdbLogger will generate logging interface for PodDisruptionBudgets -func pdbLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.PodDisruptionBudget.Namespace", namespace, "Request.PodDisruptionBudget.Name", name) - return reqLogger -} diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index bebc97738..1b2c73219 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -2,12 +2,12 @@ package k8sutils import ( "context" + "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" @@ -106,7 +106,7 @@ func generateRedisClusterInitContainerParams(cr *redisv1beta2.RedisCluster) init } // generateRedisClusterContainerParams generates Redis container information -func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { +func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -142,9 +142,9 @@ func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Inte nps := map[string]ports{} // pod name to ports replicas := cr.Spec.GetReplicaCounts(role) for i := 0; i < int(replicas); i++ { - svc, err := getService(cl, logger, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) + svc, err := getService(ctx, cl, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) if err != nil { - log.Error(err, "Cannot get service for Redis", "Setup.Type", role) + log.FromContext(ctx).Error(err, "Cannot get service for Redis", "Setup.Type", role) } else { nps[svc.Name] = ports{ announcePort: int(svc.Spec.Ports[0].NodePort), @@ -270,24 +270,22 @@ func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) in // CreateRedisClusterSetup will create Redis Setup for leader and follower func (service RedisClusterSTS) CreateRedisClusterSetup(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType - logger := statefulSetLogger(cr.Namespace, stateFulName) labels := getRedisLabels(stateFulName, cluster, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisClusterParams(ctx, cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), - generateRedisClusterContainerParams(ctx, cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), + generateRedisClusterContainerParams(ctx, cl, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) + log.FromContext(ctx).Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) return err } return nil @@ -296,7 +294,6 @@ func (service RedisClusterSTS) CreateRedisClusterSetup(ctx context.Context, cr * // CreateRedisClusterService method will create service for Redis func (service RedisClusterService) CreateRedisClusterService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, cluster, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -311,14 +308,14 @@ func (service RedisClusterService) CreateRedisClusterService(ctx context.Context objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } additionalServiceType := cr.Spec.KubernetesConfig.GetServiceType() @@ -327,13 +324,13 @@ func (service RedisClusterService) CreateRedisClusterService(ctx context.Context // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. err = service.createOrUpdateClusterNodePortService(ctx, cr, cl) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil @@ -344,7 +341,6 @@ func (service RedisClusterService) createOrUpdateClusterNodePortService(ctx cont for i := 0; i < int(replicas); i++ { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole + "-" + strconv.Itoa(i) - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(cr.ObjectMeta.Name+"-"+service.RedisServiceRole, cluster, service.RedisServiceRole, map[string]string{ "statefulset.kubernetes.io/pod-name": serviceName, }) @@ -359,9 +355,9 @@ func (service RedisClusterService) createOrUpdateClusterNodePortService(ctx cont IntVal: int32(*cr.Spec.Port + 10000), }, } - err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) + err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } diff --git a/pkg/k8sutils/redis-cluster_test.go b/pkg/k8sutils/redis-cluster_test.go index 84415b0b6..90993475d 100644 --- a/pkg/k8sutils/redis-cluster_test.go +++ b/pkg/k8sutils/redis-cluster_test.go @@ -8,7 +8,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -431,12 +430,11 @@ func Test_generateRedisClusterContainerParams(t *testing.T) { if err != nil { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - logger := testr.New(t) - actualLeaderContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), logger, input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") + actualLeaderContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") assert.EqualValues(t, expectedLeaderContainer, actualLeaderContainer, "Expected %+v, got %+v", expectedLeaderContainer, actualLeaderContainer) - actualFollowerContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), logger, input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") + actualFollowerContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") assert.EqualValues(t, expectedFollowerContainer, actualFollowerContainer, "Expected %+v, got %+v", expectedFollowerContainer, actualFollowerContainer) } diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index 14a2363c5..e288b64f1 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -2,10 +2,10 @@ package k8sutils import ( "context" + "sigs.k8s.io/controller-runtime/pkg/log" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" @@ -13,7 +13,6 @@ import ( // CreateReplicationService method will create replication service for Redis func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) epp := disableMetrics @@ -37,24 +36,24 @@ func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplica masterObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-master", cr.Namespace, masterLabels, annotations) replicaObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-replica", cr.Namespace, replicaLabels, annotations) - if err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication headless service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication headless service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { - logger.Error(err, "Cannot create additional service for Redis Replication") + if err := CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis Replication") return err } - if err := CreateOrUpdateService(cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create master service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create master service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replica service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replica service for Redis") return err } @@ -64,7 +63,6 @@ func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplica // CreateReplicationRedis will create a replication redis setup func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) @@ -72,7 +70,6 @@ func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplicati err := CreateOrUpdateStateFul( ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisReplicationParams(cr), @@ -82,7 +79,7 @@ func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplicati cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create replication statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create replication statefulset for Redis") return err } return nil @@ -217,9 +214,9 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati return initcontainerProp } -func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { +func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { // statefulset name the same as the redis replication name - sts, err := GetStatefulSet(ctx, client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) + sts, err := GetStatefulSet(ctx, client, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) if err != nil { return false } @@ -235,7 +232,7 @@ func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kub // Enhanced check: When the pod is ready, it may not have been // created as part of a replication cluster, so we should verify // whether there is an actual master node. - if master := getRedisReplicationMasterIP(ctx, client, logger, rs, dClient); master == "" { + if master := getRedisReplicationMasterIP(ctx, client, rs, dClient); master == "" { return false } return true diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 12257d29a..73d29b6d3 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -4,10 +4,10 @@ import ( "context" "encoding/json" "errors" + "sigs.k8s.io/controller-runtime/pkg/log" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -36,7 +36,7 @@ type RedisReplicationObject struct { } // Redis Sentinel Create the Redis Sentinel Setup -func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { prop := RedisSentinelSTS{ RedisStateFulType: "sentinel", Affinity: cr.Spec.Affinity, @@ -49,7 +49,7 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig } - return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl, dcl) + return prop.CreateRedisSentinelSetup(ctx, client, cr, cl, dcl) } // Create RedisSentinel Service @@ -61,7 +61,7 @@ func CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSenti } // Create Redis Sentinel Cluster Setup -func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) @@ -69,17 +69,16 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl err := CreateOrUpdateStateFul( ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisSentinelParams(ctx, cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), - generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), + generateRedisSentinelContainerParams(ctx, client, cr, service.ReadinessProbe, service.LivenessProbe, dcl), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create Sentinel statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create Sentinel statefulset for Redis") return err } return nil @@ -149,7 +148,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in } // Create Redis Sentinel Statefulset Container Params -func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { +func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -159,7 +158,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes Resources: cr.Spec.KubernetesConfig.Resources, SecurityContext: cr.Spec.SecurityContext, Port: ptr.To(sentinelPort), - AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr, dcl), + AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, cr, dcl), } if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars @@ -209,7 +208,6 @@ func (service RedisSentinelSTS) getSentinelCount(cr *redisv1beta2.RedisSentinel) // Create the Service for redis sentinel func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, sentinel, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -225,18 +223,19 @@ func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Conte headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisSentinelAsOwner(cr), @@ -247,13 +246,13 @@ func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Conte cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil } -func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { +func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { if cr.Spec.RedisSentinelConfig == nil { return &[]corev1.EnvVar{} } @@ -265,7 +264,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo }, { Name: "IP", - Value: getRedisReplicationMasterIP(ctx, client, logger, cr, dcl), + Value: getRedisReplicationMasterIP(ctx, client, cr, dcl), }, { Name: "PORT", @@ -298,7 +297,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo return envVar } -func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { +func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { replicationName := cr.Spec.RedisSentinelConfig.RedisReplicationName replicationNamespace := cr.Namespace @@ -313,41 +312,41 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac }).Namespace(replicationNamespace).Get(context.TODO(), replicationName, v1.GetOptions{}) if err != nil { - logger.Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) return "" } else { - logger.V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) } // Marshal CustomObject to JSON replicationJSON, err := customObject.MarshalJSON() if err != nil { - logger.Error(err, "Failed To Load JSON") + log.FromContext(ctx).Error(err, "Failed To Load JSON") return "" } // Unmarshal The JSON on Object if err := json.Unmarshal(replicationJSON, &replicationInstance); err != nil { - logger.Error(err, "Failed To Unmarshal JSON over the Object") + log.FromContext(ctx).Error(err, "Failed To Unmarshal JSON over the Object") return "" } - masterPods := GetRedisNodesByRole(ctx, client, logger, &replicationInstance, "master") + masterPods := GetRedisNodesByRole(ctx, client, &replicationInstance, "master") if len(masterPods) == 0 { - logger.Error(errors.New("no master pods found"), "") + log.FromContext(ctx).Error(errors.New("no master pods found"), "") return "" } for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(ctx, client, logger, &replicationInstance, podName) + redisClient := configureRedisReplicationClient(ctx, client, &replicationInstance, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { realMasterPod = podName break } } if realMasterPod == "" { - logger.Error(errors.New("no real master pod found"), "") + log.FromContext(ctx).Error(errors.New("no real master pod found"), "") return "" } @@ -355,5 +354,5 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac PodName: realMasterPod, Namespace: replicationNamespace, } - return getRedisServerIP(ctx, client, logger, realMasterInfo) + return getRedisServerIP(ctx, client, realMasterInfo) } diff --git a/pkg/k8sutils/redis-sentinel_test.go b/pkg/k8sutils/redis-sentinel_test.go index 05e57f1c5..56e997762 100644 --- a/pkg/k8sutils/redis-sentinel_test.go +++ b/pkg/k8sutils/redis-sentinel_test.go @@ -208,7 +208,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil, nil) + actual := generateRedisSentinelContainerParams(context.TODO(), nil, input, nil, nil, nil) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -364,7 +364,7 @@ func Test_getSentinelEnvVariable(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := context.TODO() - if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.logger, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { + if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { t.Errorf("getSentinelEnvVariable() = %v, want %v", got, tt.want) } }) diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index d9a48f226..af20aaef3 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -6,11 +6,11 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateStandaloneService method will create standalone service for Redis func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -25,17 +25,18 @@ func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kub objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone headless service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone headless service for Redis") return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone service for Redis") return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisAsOwner(cr), @@ -46,7 +47,7 @@ func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kub cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis") + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis") return err } return nil @@ -54,14 +55,12 @@ func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kub // CreateStandaloneRedis will create a standalone redis setup func CreateStandaloneRedis(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisStandaloneParams(cr), @@ -71,7 +70,7 @@ func CreateStandaloneRedis(ctx context.Context, cr *redisv1beta2.Redis, cl kuber cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create standalone statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone statefulset for Redis") return err } return nil diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index 1dd7ee843..c39fc7473 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -6,11 +6,11 @@ import ( "encoding/csv" "fmt" "net" + "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,35 +26,35 @@ type RedisDetails struct { } // getRedisServerIP will return the IP of redis service -func getRedisServerIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, redisInfo RedisDetails) string { - logger.V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) +func getRedisServerIP(ctx context.Context, client kubernetes.Interface, redisInfo RedisDetails) string { + log.FromContext(ctx).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) redisPod, err := client.CoreV1().Pods(redisInfo.Namespace).Get(context.TODO(), redisInfo.PodName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } redisIP := redisPod.Status.PodIP - logger.V(1).Info("Fetched Redis pod IP", "ip", redisIP) + log.FromContext(ctx).Info("Fetched Redis pod IP", "ip", redisIP) // Check if IP is empty if redisIP == "" { - logger.V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } // If we're NOT IPv4, assume we're IPv6.. if net.ParseIP(redisIP).To4() == nil { - logger.V(1).Info("Redis is using IPv6", "ip", redisIP) + log.FromContext(ctx).Info("Redis is using IPv6", "ip", redisIP) } - logger.V(1).Info("Successfully got the IP for Redis", "ip", redisIP) + log.FromContext(ctx).Info("Successfully got the IP for Redis", "ip", redisIP) return redisIP } -func getRedisServerAddress(ctx context.Context, client kubernetes.Interface, logger logr.Logger, rd RedisDetails, port int) string { - ip := getRedisServerIP(ctx, client, logger, rd) +func getRedisServerAddress(ctx context.Context, client kubernetes.Interface, rd RedisDetails, port int) string { + ip := getRedisServerIP(ctx, client, rd) format := "%s:%d" // if ip is IPv6, wrap it in brackets @@ -72,12 +72,12 @@ func getRedisHostname(redisInfo RedisDetails, cr *redisv1beta2.RedisCluster, rol } // CreateSingleLeaderRedisCommand will create command for single leader cluster creation -func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateSingleLeaderRedisCommand(ctx context.Context, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "CLUSTER", "ADDSLOTS"} for i := 0; i < 16384; i++ { cmd = append(cmd, strconv.Itoa(i)) } - logger.V(1).Info("Generating Redis Add Slots command for single node cluster", + log.FromContext(ctx).Info("Generating Redis Add Slots command for single node cluster", "BaseCommand", cmd[:3], "SlotsRange", "0-16383", "TotalSlots", 16384) @@ -87,14 +87,14 @@ func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCl // RepairDisconnectedMasters attempts to repair disconnected/failed masters by issuing // a CLUSTER MEET with the updated address of the host -func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - return repairDisconnectedMasters(ctx, client, logger, cr, redisClient) + return repairDisconnectedMasters(ctx, client, cr, redisClient) } -func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { - nodes, err := clusterNodes(ctx, redisClient, logger) +func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { + nodes, err := clusterNodes(ctx, redisClient) if err != nil { return err } @@ -106,12 +106,11 @@ func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, if !nodeFailedOrDisconnected(node) { continue } - log.V(1).Info("found disconnected master node", "node", node) podName, err := getMasterHostFromClusterNode(node) if err != nil { return err } - ip := getRedisServerIP(ctx, client, logger, RedisDetails{ + ip := getRedisServerIP(ctx, client, RedisDetails{ PodName: podName, Namespace: cr.Namespace, }) @@ -133,7 +132,7 @@ func getMasterHostFromClusterNode(node clusterNodesResponse) (string, error) { } // CreateMultipleLeaderRedisCommand will create command for single leader cluster creation -func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "--cluster", "create"} replicas := cr.Spec.GetReplicaCounts("leader") @@ -143,42 +142,42 @@ func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Int if cr.Spec.ClusterVersion != nil && *cr.Spec.ClusterVersion == "v7" { address = getRedisHostname(RedisDetails{PodName: podName, Namespace: cr.Namespace}, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - address = getRedisServerAddress(ctx, client, logger, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) + address = getRedisServerAddress(ctx, client, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) } cmd = append(cmd, address) } cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) + log.FromContext(ctx).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) return cmd } // ExecuteRedisClusterCommand will execute redis cluster creation command -func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string replicas := cr.Spec.GetReplicaCounts("leader") switch int(replicas) { case 1: - err := executeFailoverCommand(ctx, client, logger, cr, "leader") + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "error executing failover command") + log.FromContext(ctx).Error(err, "error executing failover command") } - cmd = CreateSingleLeaderRedisCommand(logger, cr) + cmd = CreateSingleLeaderRedisCommand(ctx, cr) default: - cmd = CreateMultipleLeaderRedisCommand(ctx, client, logger, cr) + cmd = CreateMultipleLeaderRedisCommand(ctx, client, cr) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster creation command is", "Command", cmd) - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).Info("Redis cluster creation command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []string { @@ -194,7 +193,7 @@ func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []str } // createRedisReplicationCommand will create redis replication creation command -func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { +func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { cmd := []string{"redis-cli", "--cluster", "add-node"} var followerAddress, leaderAddress string @@ -202,16 +201,16 @@ func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interf followerAddress = getRedisHostname(followerPod, cr, "follower") + fmt.Sprintf(":%d", *cr.Spec.Port) leaderAddress = getRedisHostname(leaderPod, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - followerAddress = getRedisServerAddress(ctx, client, logger, followerPod, *cr.Spec.Port) - leaderAddress = getRedisServerAddress(ctx, client, logger, leaderPod, *cr.Spec.Port) + followerAddress = getRedisServerAddress(ctx, client, followerPod, *cr.Spec.Port) + leaderAddress = getRedisServerAddress(ctx, client, leaderPod, *cr.Spec.Port) } cmd = append(cmd, followerAddress, leaderAddress, "--cluster-slave") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) + log.FromContext(ctx).Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) } else { cmd = append(cmd, "-a", pass) } @@ -219,7 +218,7 @@ func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interf cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...) - logger.V(1).Info("Generated Redis replication command", + log.FromContext(ctx).Info("Generated Redis replication command", "FollowerAddress", followerAddress, "LeaderAddress", leaderAddress, "Command", cmd) @@ -227,18 +226,18 @@ func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interf } // ExecuteRedisReplicationCommand will execute the replication command -func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var podIP string followerCounts := cr.Spec.GetReplicaCounts("follower") leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - nodes, err := clusterNodes(ctx, redisClient, logger) + nodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } for followerIdx := 0; followerIdx <= int(followerCounts)-1; { for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ { @@ -250,24 +249,24 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa((followerIdx)%int(leaderCounts)), Namespace: cr.Namespace, } - podIP = getRedisServerIP(ctx, client, logger, followerPod) + podIP = getRedisServerIP(ctx, client, followerPod) if !checkRedisNodePresence(ctx, cr, nodes, podIP) { - logger.V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) - cmd := createRedisReplicationCommand(ctx, client, logger, cr, leaderPod, followerPod) - redisClient := configureRedisClient(ctx, client, logger, cr, followerPod.PodName) + log.FromContext(ctx).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) + cmd := createRedisReplicationCommand(ctx, client, cr, leaderPod, followerPod) + redisClient := configureRedisClient(ctx, client, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() redisClient.Close() if err != nil { - logger.Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) + log.FromContext(ctx).Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) continue } if pong == "PONG" { - executeCommand(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } else { - logger.V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) + log.FromContext(ctx).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) } } else { - logger.V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) + log.FromContext(ctx).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) } followerIdx++ @@ -278,7 +277,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter type clusterNodesResponse []string // clusterNodes will returns the response of CLUSTER NODES -func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Logger) ([]clusterNodesResponse, error) { +func clusterNodes(ctx context.Context, redisClient *redis.Client) ([]clusterNodesResponse, error) { output, err := redisClient.ClusterNodes(ctx).Result() if err != nil { return nil, err @@ -299,62 +298,61 @@ func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Lo } // ExecuteFailoverOperation will execute redis failover operations -func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - err := executeFailoverCommand(ctx, client, logger, cr, "leader") +func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "Redis command failed for leader nodes") return err } - err = executeFailoverCommand(ctx, client, logger, cr, "follower") + err = executeFailoverCommand(ctx, client, cr, "follower") if err != nil { - logger.Error(err, "Redis command failed for follower nodes") return err } return nil } // executeFailoverCommand will execute failover command -func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, role string) error { +func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, role string) error { replicas := cr.Spec.GetReplicaCounts(role) podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { - logger.V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) - client := configureRedisClient(ctx, client, logger, cr, podName+strconv.Itoa(podCount)) + log.FromContext(ctx).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) + client := configureRedisClient(ctx, client, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + + log.FromContext(ctx).Error(err, "Redis command failed with this error") flushcommand := redis.NewStringCmd(ctx, "flushall") err = client.Process(ctx, flushcommand) if err != nil { - logger.Error(err, "Redis flush command failed with this error") + log.FromContext(ctx).Error(err, "Redis flush command failed with this error") return err } } err = client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } - logger.V(1).Info("Redis cluster failover executed", "Output", output) + log.FromContext(ctx).Info("Redis cluster failover executed", "Output", output) } return nil } // CheckRedisNodeCount will check the count of redis nodes -func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, nodeType string) int32 { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var redisNodeType string - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } count := len(clusterNodes) @@ -373,29 +371,29 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge count++ } } - logger.V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) + log.FromContext(ctx).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) } else { - logger.V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) + log.FromContext(ctx).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) } return int32(count) } // RedisClusterStatusHealth use `redis-cli --cluster check 127.0.0.1:6379` -func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() cmd := []string{"redis-cli", "--cluster", "check", "127.0.0.1:6379"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - out, err := executeCommand1(ctx, client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + out, err := executeCommand1(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if err != nil { return false } @@ -409,10 +407,10 @@ func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, } // UnhealthyNodesInCluster returns the number of unhealthy nodes in the cluster cr -func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) (int, error) { - redisClient := configureRedisClient(ctx, client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) (int, error) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { return 0, err } @@ -422,7 +420,7 @@ func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, l count++ } } - logger.V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) + log.FromContext(ctx).Info("Number of failed nodes in cluster", "Failed Node Count", count) return count, nil } @@ -435,7 +433,7 @@ func nodeFailedOrDisconnected(node clusterNodesResponse) bool { } // configureRedisClient will configure the Redis Client -func configureRedisClient(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { +func configureRedisClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -443,45 +441,45 @@ func configureRedisClient(ctx context.Context, client kubernetes.Interface, logg var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(ctx, client, logger, redisInfo, *cr.Spec.Port), + Addr: getRedisServerAddress(ctx, client, redisInfo, *cr.Spec.Port), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(ctx, client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) } return redis.NewClient(opts) } // executeCommand will execute the commands in pod -func executeCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { - execOut, execErr := executeCommand1(ctx, client, logger, cr, cmd, podName) +func executeCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { + execOut, execErr := executeCommand1(ctx, client, cr, cmd, podName) if execErr != nil { - logger.Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) return } - logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).Info("Successfully executed the command", "Command", cmd, "Output", execOut) } -func executeCommand1(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { +func executeCommand1(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { var ( execOut bytes.Buffer execErr bytes.Buffer ) config, err := GenerateK8sConfig()() if err != nil { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } - targetContainer, pod := getContainerID(ctx, client, logger, cr, podName) + targetContainer, pod := getContainerID(ctx, client, cr, podName) if targetContainer < 0 { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } @@ -494,7 +492,7 @@ func executeCommand1(ctx context.Context, client kubernetes.Interface, logger lo }, scheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) if err != nil { - logger.Error(err, "Failed to init executor") + log.FromContext(ctx).Error(err, "Failed to init executor") return "", err } @@ -510,27 +508,27 @@ func executeCommand1(ctx context.Context, client kubernetes.Interface, logger lo } // getContainerID will return the id of container from pod -func getContainerID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { +func getContainerID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { pod, err := client.CoreV1().Pods(cr.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) return -1, nil } - logger.V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) targetContainer := -1 for containerID, tr := range pod.Spec.Containers { - logger.V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) if tr.Name == cr.ObjectMeta.Name+"-leader" { targetContainer = containerID - logger.V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) break } } if targetContainer == -1 { - logger.V(1).Info("Leader container not found in pod", "Pod Name", podName) + log.FromContext(ctx).Info("Leader container not found in pod", "Pod Name", podName) return -1, nil } @@ -539,8 +537,7 @@ func getContainerID(ctx context.Context, client kubernetes.Interface, logger log // checkRedisNodePresence will check if the redis node exist in cluster or not func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - logger.V(1).Info("Checking if Node is in cluster", "Node", nodeName) + log.FromContext(ctx).Info("Checking if Node is in cluster", "Node", nodeName) for _, node := range nodeList { s := strings.Split(node[1], ":") if s[0] == nodeName { @@ -550,14 +547,8 @@ func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, return false } -// generateRedisManagerLogger will generate logging interface for Redis operations -func generateRedisManagerLogger(namespace, name string) logr.Logger { - reqLogger := log.WithValues("Request.RedisManager.Namespace", namespace, "Request.RedisManager.Name", name) - return reqLogger -} - // configureRedisClient will configure the Redis Client -func configureRedisReplicationClient(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { +func configureRedisReplicationClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -565,27 +556,27 @@ func configureRedisReplicationClient(ctx context.Context, client kubernetes.Inte var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(ctx, client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(ctx, client, logger, redisInfo, 6379), + Addr: getRedisServerAddress(ctx, client, redisInfo, 6379), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(ctx, client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) } return redis.NewClient(opts) } // Get Redis nodes by it's role i.e. master, slave and sentinel -func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { - statefulset, err := GetStatefulSet(ctx, cl, logger, cr.GetNamespace(), cr.GetName()) +func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisReplication, redisRole string) []string { + statefulset, err := GetStatefulSet(ctx, cl, cr.GetNamespace(), cr.GetName()) if err != nil { - logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) } var pods []string @@ -593,9 +584,9 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - redisClient := configureRedisReplicationClient(ctx, cl, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, cl, cr, podName) defer redisClient.Close() - podRole := checkRedisServerRole(ctx, redisClient, logger, podName) + podRole := checkRedisServerRole(ctx, redisClient, podName) if podRole == redisRole { pods = append(pods, podName) } @@ -605,29 +596,29 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo } // Check the Redis Server Role i.e. master, slave and sentinel -func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) string { +func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, podName string) string { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return "" } lines := strings.Split(info, "\r\n") for _, line := range lines { if strings.HasPrefix(line, "role:") { role := strings.TrimPrefix(line, "role:") - logger.V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) + log.FromContext(ctx).Info("Role of the Redis Pod", "pod", podName, "role", role) return role } } - logger.Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) return "" } // checkAttachedSlave would return redis pod name which has slave -func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) int { +func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, podName string) int { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) return -1 // return -1 if failed to get the connected slaves count } @@ -637,35 +628,35 @@ func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger l var connected_slaves int connected_slaves, err = strconv.Atoi(strings.TrimPrefix(line, "connected_slaves:")) if err != nil { - logger.Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) return -1 } - logger.V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) + log.FromContext(ctx).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) return connected_slaves } } - logger.Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) return 0 } -func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { - logger.V(1).Info("Redis Master Node is set to", "pod", realMasterPod) +func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { + log.FromContext(ctx).Info("Redis Master Node is set to", "pod", realMasterPod) realMasterInfo := RedisDetails{ PodName: realMasterPod, Namespace: cr.Namespace, } - realMasterPodIP := getRedisServerIP(ctx, client, logger, realMasterInfo) + realMasterPodIP := getRedisServerIP(ctx, client, realMasterInfo) for i := 0; i < len(masterPods); i++ { if masterPods[i] != realMasterPod { - redisClient := configureRedisReplicationClient(ctx, client, logger, cr, masterPods[i]) + redisClient := configureRedisReplicationClient(ctx, client, cr, masterPods[i]) defer redisClient.Close() - logger.V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() if err != nil { - logger.Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) return err } } @@ -674,12 +665,12 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa return nil } -func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { +func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string) string { for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(ctx, client, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, client, cr, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { return podName } } diff --git a/pkg/k8sutils/redis_test.go b/pkg/k8sutils/redis_test.go index 922e80df4..674c92aa1 100644 --- a/pkg/k8sutils/redis_test.go +++ b/pkg/k8sutils/redis_test.go @@ -10,8 +10,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mock_utils "github.com/OT-CONTAINER-KIT/redis-operator/mocks/utils" - "github.com/go-logr/logr" - "github.com/go-logr/logr/testr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" @@ -80,7 +78,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,redis-cluster-lea }) mock.ExpectClusterMeet(newPodIP, "6379").SetVal("OK") port := 6379 - err := repairDisconnectedMasters(ctx, k8sClient, logr.Discard(), &redisv1beta2.RedisCluster{ + err := repairDisconnectedMasters(ctx, k8sClient, &redisv1beta2.RedisCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, }, @@ -175,8 +173,8 @@ func TestGetRedisServerIP(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerIP(context.TODO(), client, logger, tt.redisInfo) + + redisIP := getRedisServerIP(context.TODO(), client, tt.redisInfo) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty IP address") @@ -240,8 +238,8 @@ func TestGetRedisServerAddress(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerAddress(context.TODO(), client, logger, tt.redisInfo, 6379) + + redisIP := getRedisServerAddress(context.TODO(), client, tt.redisInfo, 6379) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty address") @@ -286,9 +284,9 @@ func TestGetRedisHostname(t *testing.T) { } func TestCreateSingleLeaderRedisCommand(t *testing.T) { - logger := testr.New(t) + cr := &redisv1beta2.RedisCluster{} - cmd := CreateSingleLeaderRedisCommand(logger, cr) + cmd := CreateSingleLeaderRedisCommand(context.TODO(), cr) assert.Equal(t, "redis-cli", cmd[0]) assert.Equal(t, "CLUSTER", cmd[1]) @@ -353,9 +351,8 @@ func TestCreateMultipleLeaderRedisCommand(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := mock_utils.CreateFakeClientWithPodIPs_LeaderPods(tt.redisCluster) - logger := testr.New(t) - cmd := CreateMultipleLeaderRedisCommand(context.TODO(), client, logger, tt.redisCluster) + cmd := CreateMultipleLeaderRedisCommand(context.TODO(), client, tt.redisCluster) assert.Equal(t, tt.expectedCommands, cmd) }) } @@ -391,7 +388,7 @@ func TestGetRedisTLSArgs(t *testing.T) { } func TestCreateRedisReplicationCommand(t *testing.T) { - logger := logr.Discard() + type secret struct { name string namespace string @@ -530,7 +527,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) { objects = append(objects, secret...) client := fake.NewSimpleClientset(objects...) - cmd := createRedisReplicationCommand(context.TODO(), client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod) + cmd := createRedisReplicationCommand(context.TODO(), client, tt.redisCluster, tt.leaderPod, tt.followerPod) // Assert the command is as expected using testify assert.Equal(t, tt.expectedCommand, cmd) @@ -614,8 +611,7 @@ func TestGetContainerID(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.setupPod) - logger := testr.New(t) - id, pod := getContainerID(context.TODO(), client, logger, test.redisCluster, test.setupPod.Name) + id, pod := getContainerID(context.TODO(), client, test.redisCluster, test.setupPod.Name) if test.expectError { assert.Nil(t, pod, "Expected no pod but got one") assert.Equal(t, test.expectedID, id, "Expected ID does not match") @@ -630,7 +626,6 @@ func TestGetContainerID(t *testing.T) { } func Test_checkAttachedSlave(t *testing.T) { - logger := logr.Discard() tests := []struct { name string @@ -709,7 +704,7 @@ func Test_checkAttachedSlave(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - slaveCount := checkAttachedSlave(ctx, client, logger, tt.podName) + slaveCount := checkAttachedSlave(ctx, client, tt.podName) assert.Equal(t, tt.expectedSlaveCount, slaveCount, "Test case: "+tt.name) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unmet expectations: %s", err) @@ -719,8 +714,6 @@ func Test_checkAttachedSlave(t *testing.T) { } func Test_checkRedisServerRole(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string podName string @@ -798,7 +791,7 @@ func Test_checkRedisServerRole(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - role := checkRedisServerRole(ctx, client, logger, tt.podName) + role := checkRedisServerRole(ctx, client, tt.podName) if tt.shouldFail { assert.Empty(t, role, "Test case: "+tt.name) } else { @@ -812,7 +805,7 @@ func Test_checkRedisServerRole(t *testing.T) { } func TestClusterNodes(t *testing.T) { - logger := logr.Discard() // Discard logs + // Discard logs tests := []struct { name string @@ -853,7 +846,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself, } else { mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput) } - result, err := clusterNodes(context.TODO(), db, logger) + result, err := clusterNodes(context.TODO(), db) if tc.expectError != nil { assert.Nil(t, result) diff --git a/pkg/k8sutils/secrets.go b/pkg/k8sutils/secrets.go index 3f67a522c..8f095bc1e 100644 --- a/pkg/k8sutils/secrets.go +++ b/pkg/k8sutils/secrets.go @@ -7,35 +7,32 @@ import ( "errors" "strings" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var log = logf.Log.WithName("controller_redis") - // getRedisPassword method will return the redis password from the secret -func getRedisPassword(ctx context.Context, client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { +func getRedisPassword(ctx context.Context, client kubernetes.Interface, namespace, name, secretKey string) (string, error) { secretName, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Failed in getting existing secret for redis") + logf.FromContext(ctx).Error(err, "Failed in getting existing secret for redis") return "", err } for key, value := range secretName.Data { if key == secretKey { - logger.V(1).Info("Secret key found in the secret", "secretKey", secretKey) + logf.FromContext(ctx).Info("Secret key found in the secret", "secretKey", secretKey) return strings.TrimSpace(string(value)), nil } } - logger.Error(errors.New("secret key not found"), "Secret key not found in the secret") + logf.FromContext(ctx).Error(errors.New("secret key not found"), "Secret key not found in the secret") return "", nil } -func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, logger logr.Logger, namespace, tlsSecretName, podName string) *tls.Config { +func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, namespace, tlsSecretName, podName string) *tls.Config { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), tlsSecretName, metav1.GetOptions{}) if err != nil { - logger.V(1).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) return nil } @@ -44,20 +41,20 @@ func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, logger tlsCaCertificate, caExists := secret.Data["ca.crt"] if !certExists || !keyExists || !caExists { - logger.Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") + logf.FromContext(ctx).Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") return nil } cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) if err != nil { - logger.V(1).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) return nil } tlsCaCertificates := x509.NewCertPool() ok := tlsCaCertificates.AppendCertsFromPEM(tlsCaCertificate) if !ok { - logger.V(1).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) return nil } diff --git a/pkg/k8sutils/secrets_test.go b/pkg/k8sutils/secrets_test.go index f9e8241bd..10c42d35c 100644 --- a/pkg/k8sutils/secrets_test.go +++ b/pkg/k8sutils/secrets_test.go @@ -8,7 +8,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -85,8 +84,8 @@ func Test_getRedisPassword(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - got, err := getRedisPassword(context.TODO(), client, logger, tt.namespace, tt.secretName, tt.secretKey) + + got, err := getRedisPassword(context.TODO(), client, tt.namespace, tt.secretName, tt.secretKey) if tt.expectedErr { require.Error(t, err, "Expected an error but didn't get one") @@ -222,8 +221,8 @@ func Test_getRedisTLSConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - tlsConfig := getRedisTLSConfig(context.TODO(), client, logger, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) + + tlsConfig := getRedisTLSConfig(context.TODO(), client, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) if tt.expectTLS { require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") diff --git a/pkg/k8sutils/services.go b/pkg/k8sutils/services.go index 627117277..cd8a49d4f 100644 --- a/pkg/k8sutils/services.go +++ b/pkg/k8sutils/services.go @@ -2,9 +2,9 @@ package k8sutils import ( "context" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,66 +94,59 @@ func generateServiceType(k8sServiceType string) corev1.ServiceType { } // createService is a method to create service is Kubernetes -func createService(kusClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func createService(ctx context.Context, kusClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := kusClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis service creation is failed") + log.FromContext(ctx).Error(err, "Redis service creation is failed") return err } - logger.V(1).Info("Redis service creation is successful") + log.FromContext(ctx).Info("Redis service creation is successful") return nil } // updateService is a method to update service is Kubernetes -func updateService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func updateService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := k8sClient.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis service update failed") + log.FromContext(ctx).Error(err, "Redis service update failed") return err } - logger.V(1).Info("Redis service updated successfully") + log.FromContext(ctx).Info("Redis service updated successfully") return nil } // getService is a method to get service is Kubernetes -func getService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, name string) (*corev1.Service, error) { +func getService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, name string) (*corev1.Service, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("Service", "v1"), } serviceInfo, err := k8sClient.CoreV1().Services(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis service get action is failed") + log.FromContext(ctx).Info("Redis service get action is failed") return nil, err } - logger.V(1).Info("Redis service get action is successful") + log.FromContext(ctx).Info("Redis service get action is successful") return serviceInfo, nil } -func serviceLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Service.Namespace", namespace, "Request.Service.Name", name) - return reqLogger -} - // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdateService(namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { - logger := serviceLogger(namespace, serviceMeta.Name) +func CreateOrUpdateService(ctx context.Context, namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { serviceDef := generateServiceDef(serviceMeta, epp, ownerDef, headless, serviceType, port, extra...) - storedService, err := getService(cl, logger, namespace, serviceMeta.GetName()) + storedService, err := getService(ctx, cl, namespace, serviceMeta.GetName()) if err != nil { if errors.IsNotFound(err) { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(serviceDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis service with compare annotations") + log.FromContext(ctx).Error(err, "Unable to patch redis service with compare annotations") } - return createService(cl, logger, namespace, serviceDef) + return createService(ctx, cl, namespace, serviceDef) } return err } - return patchService(storedService, serviceDef, namespace, cl) + return patchService(ctx, storedService, serviceDef, namespace, cl) } // patchService will patch Redis Kubernetes service -func patchService(storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { - logger := serviceLogger(namespace, storedService.Name) +func patchService(ctx context.Context, storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newService.ResourceVersion = storedService.ResourceVersion newService.CreationTimestamp = storedService.CreationTimestamp @@ -169,11 +162,11 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) for key, value := range storedService.Annotations { if _, present := newService.Annotations[key]; !present { @@ -181,12 +174,12 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newService); err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } - logger.V(1).Info("Syncing Redis service with defined properties") - return updateService(cl, logger, namespace, newService) + log.FromContext(ctx).Info("Syncing Redis service with defined properties") + return updateService(ctx, cl, namespace, newService) } - logger.V(1).Info("Redis service is already in-sync") + log.FromContext(ctx).Info("Redis service is already in-sync") return nil } diff --git a/pkg/k8sutils/services_test.go b/pkg/k8sutils/services_test.go index 5906d61db..c5ef6b05f 100644 --- a/pkg/k8sutils/services_test.go +++ b/pkg/k8sutils/services_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -311,7 +310,7 @@ func Test_createService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tt.exist { k8sClient = k8sClientFake.NewSimpleClientset(tt.service.DeepCopyObject()) @@ -319,7 +318,7 @@ func Test_createService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := createService(k8sClient, logger, tt.service.GetNamespace(), tt.service) + err := createService(context.TODO(), k8sClient, tt.service.GetNamespace(), tt.service) if tt.wantErr { assert.Error(t, err) } else { @@ -407,10 +406,10 @@ func Test_updateService(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) + k8sClient := k8sClientFake.NewSimpleClientset(tt.current.DeepCopyObject()) - err := updateService(k8sClient, logger, tt.servinceNamespace, tt.updated) + err := updateService(context.TODO(), k8sClient, tt.servinceNamespace, tt.updated) if tt.wantErr { assert.Error(t, err) } else { @@ -460,7 +459,7 @@ func Test_getService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) + var k8sClient *k8sClientFake.Clientset if tt.have != nil { k8sClient = k8sClientFake.NewSimpleClientset(tt.have.DeepCopyObject()) @@ -468,7 +467,7 @@ func Test_getService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - got, err := getService(k8sClient, logger, tt.want.GetNamespace(), tt.want.GetName()) + got, err := getService(context.TODO(), k8sClient, tt.want.GetNamespace(), tt.want.GetName()) if tt.wantErr { assert.Error(t, err) } else { diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index 862b435ca..1eb29765b 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path" + "sigs.k8s.io/controller-runtime/pkg/log" "sort" "strconv" "strings" @@ -45,13 +46,11 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, var ( partition = 0 replicas = 1 - - logger = s.log.WithValues("namespace", namespace, "name", name) ) sts, err := s.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "failed to get statefulset") + log.FromContext(ctx).Error(err, "failed to get statefulset") return false } @@ -63,19 +62,19 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, } if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) { - logger.V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) + log.FromContext(ctx).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) return false } if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - logger.V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) + log.FromContext(ctx).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) return false } if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - logger.V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) + log.FromContext(ctx).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) return false } if int(sts.Status.ReadyReplicas) != replicas { - logger.V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) + log.FromContext(ctx).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) return false } return true @@ -154,16 +153,16 @@ type initContainerParameters struct { } // CreateOrUpdateStateFul method will create or update Redis service -func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { - storedStateful, err := GetStatefulSet(ctx, cl, logger, namespace, stsMeta.Name) +func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { + storedStateful, err := GetStatefulSet(ctx, cl, namespace, stsMeta.Name) statefulSetDef := generateStatefulSetsDef(stsMeta, params, ownerDef, initcontainerParams, containerParams, getSidecars(sidecars)) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(statefulSetDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if apierrors.IsNotFound(err) { - return createStatefulSet(ctx, cl, logger, namespace, statefulSetDef) + return createStatefulSet(ctx, cl, namespace, statefulSetDef) } return err } @@ -172,7 +171,6 @@ func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, logger // patchStateFulSet will patch Redis Kubernetes StateFulSet func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { - logger := statefulSetLogger(namespace, storedStateful.Name) // We want to try and keep this atomic as possible. newStateful.ResourceVersion = storedStateful.ResourceVersion newStateful.CreationTimestamp = storedStateful.CreationTimestamp @@ -185,11 +183,11 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) if len(newStateful.Spec.VolumeClaimTemplates) >= 1 && len(newStateful.Spec.VolumeClaimTemplates) == len(storedStateful.Spec.VolumeClaimTemplates) { // Field is immutable therefore we MUST keep it as is. if !apiequality.Semantic.DeepEqual(newStateful.Spec.VolumeClaimTemplates[0].Spec, storedStateful.Spec.VolumeClaimTemplates[0].Spec) { @@ -237,7 +235,7 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n if !updateFailed { updateFailed = true } - logger.Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") + log.FromContext(ctx).Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") } } } @@ -246,9 +244,9 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n annotations["storageCapacity"] = fmt.Sprintf("%d", stateCapacity) storedStateful.Annotations = annotations if realUpdate { - logger.Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) + log.FromContext(ctx).Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) } else { - logger.Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) + log.FromContext(ctx).Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) } } } @@ -265,12 +263,12 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newStateful); err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } - return updateStatefulSet(ctx, cl, logger, namespace, newStateful, recreateStateFulSet) + return updateStatefulSet(ctx, cl, namespace, newStateful, recreateStateFulSet) } - logger.V(1).Info("Reconciliation Complete, no Changes required.") + log.FromContext(ctx).Info("Reconciliation Complete, no Changes required.") return nil } @@ -767,18 +765,18 @@ func getEnvironmentVariables(role string, enabledPassword *bool, secretName *str } // createStatefulSet is a method to create statefulset in Kubernetes -func createStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { +func createStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet) error { _, err := cl.AppsV1().StatefulSets(namespace).Create(context.TODO(), stateful, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis stateful creation failed") + log.FromContext(ctx).Error(err, "Redis stateful creation failed") return err } - logger.V(1).Info("Redis stateful successfully created") + log.FromContext(ctx).Info("Redis stateful successfully created") return nil } // updateStatefulSet is a method to update statefulset in Kubernetes -func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { +func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { _, err := cl.AppsV1().StatefulSets(namespace).Update(context.TODO(), stateful, metav1.UpdateOptions{}) if recreateStateFulSet { sErr, ok := err.(*apierrors.StatusError) @@ -787,7 +785,7 @@ func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr for messageCount, cause := range sErr.ErrStatus.Details.Causes { failMsg[messageCount] = cause.Message } - logger.V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) + log.FromContext(ctx).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) propagationPolicy := metav1.DeletePropagationForeground if err := cl.AppsV1().StatefulSets(namespace).Delete(context.TODO(), stateful.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { //nolint return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") @@ -795,33 +793,27 @@ func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr } } if err != nil { - logger.Error(err, "Redis statefulset update failed") + log.FromContext(ctx).Error(err, "Redis statefulset update failed") return err } - logger.V(1).Info("Redis statefulset successfully updated ") + log.FromContext(ctx).Info("Redis statefulset successfully updated ") return nil } // GetStateFulSet is a method to get statefulset in Kubernetes -func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { +func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, name string) (*appsv1.StatefulSet, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("StatefulSet", "apps/v1"), } statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis statefulset get action failed") + log.FromContext(ctx).Info("Redis statefulset get action failed") return nil, err } - logger.V(1).Info("Redis statefulset get action was successful") + log.FromContext(ctx).Info("Redis statefulset get action was successful") return statefulInfo, nil } -// statefulSetLogger will generate logging interface for Statfulsets -func statefulSetLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.StatefulSet.Namespace", namespace, "Request.StatefulSet.Name", name) - return reqLogger -} - func getSidecars(sidecars *[]redisv1beta2.Sidecar) []redisv1beta2.Sidecar { if sidecars == nil { return []redisv1beta2.Sidecar{} diff --git a/pkg/k8sutils/statefulset_test.go b/pkg/k8sutils/statefulset_test.go index 181e58276..dc3dd341a 100644 --- a/pkg/k8sutils/statefulset_test.go +++ b/pkg/k8sutils/statefulset_test.go @@ -7,7 +7,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -196,7 +195,6 @@ func TestGetVolumeMount(t *testing.T) { } func Test_GetStatefulSet(t *testing.T) { - logger := logr.Discard() tests := []struct { name string @@ -229,7 +227,7 @@ func Test_GetStatefulSet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) - _, err := GetStatefulSet(context.TODO(), client, logger, test.stsNamespace, test.stsName) + _, err := GetStatefulSet(context.TODO(), client, test.stsNamespace, test.stsName) if test.present { assert.Nil(t, err) } else { @@ -240,7 +238,6 @@ func Test_GetStatefulSet(t *testing.T) { } func Test_createStatefulSet(t *testing.T) { - logger := logr.Discard() tests := []struct { name string @@ -279,7 +276,7 @@ func Test_createStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := createStatefulSet(context.TODO(), client, logger, test.sts.GetNamespace(), &test.sts) + err := createStatefulSet(context.TODO(), client, test.sts.GetNamespace(), &test.sts) if test.present { assert.NotNil(t, err) } else { @@ -290,7 +287,7 @@ func Test_createStatefulSet(t *testing.T) { } func TestUpdateStatefulSet(t *testing.T) { - logger := logr.Discard() + tests := []struct { name string existingStsSpec appsv1.StatefulSetSpec @@ -395,7 +392,7 @@ func TestUpdateStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := updateStatefulSet(context.TODO(), client, logger, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) + err := updateStatefulSet(context.TODO(), client, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -412,7 +409,7 @@ func TestUpdateStatefulSet(t *testing.T) { } func TestCreateOrUpdateStateFul(t *testing.T) { - logger := logr.Discard() + tests := []struct { name string stsParams statefulSetParameters @@ -554,7 +551,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := CreateOrUpdateStateFul(context.TODO(), client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -577,7 +574,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { client = k8sClientFake.NewSimpleClientset() - err := CreateOrUpdateStateFul(context.TODO(), client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) assert.Nil(err) } }) diff --git a/pkg/k8sutils/status.go b/pkg/k8sutils/status.go index 16d952e19..ddd47e3c6 100644 --- a/pkg/k8sutils/status.go +++ b/pkg/k8sutils/status.go @@ -3,10 +3,10 @@ package k8sutils import ( "context" "reflect" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/OT-CONTAINER-KIT/redis-operator/api/status" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -14,15 +14,8 @@ import ( "k8s.io/client-go/dynamic" ) -// statusLogger will generate logging interface for status -func statusLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Namespace", namespace, "Request.Name", name) - return reqLogger -} - // UpdateRedisClusterStatus will update the status of the RedisCluster func UpdateRedisClusterStatus(ctx context.Context, cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { - logger := statusLogger(cr.Namespace, cr.Name) newStatus := redisv1beta2.RedisClusterStatus{ State: state, Reason: reason, @@ -40,14 +33,14 @@ func UpdateRedisClusterStatus(ctx context.Context, cr *redisv1beta2.RedisCluster } unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cr) if err != nil { - logger.Error(err, "Failed to convert CR to unstructured object") + log.FromContext(ctx).Error(err, "Failed to convert CR to unstructured object") return err } unstructuredRedisCluster := &unstructured.Unstructured{Object: unstructuredObj} _, err = dcl.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Failed to update status") + log.FromContext(ctx).Error(err, "Failed to update status") return err } return nil From 8df44c3be7b96e8fdda4ab348a74a946ec1df3f6 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 16:20:44 +0800 Subject: [PATCH 4/8] remove logger Signed-off-by: drivebyer --- main.go | 12 +++--------- pkg/controllers/redis/redis_controller.go | 4 ---- .../rediscluster/rediscluster_controller.go | 2 -- .../rediscluster_controller_suite_test.go | 3 +-- .../redisreplication/redisreplication_controller.go | 8 +++----- .../redisreplication_controller_suite_test.go | 5 ++--- .../redissentinel/redissentinel_controller.go | 4 ---- pkg/k8sutils/pod.go | 11 ++++------- pkg/k8sutils/redis-sentinel_test.go | 4 ---- pkg/k8sutils/statefulset.go | 6 +----- 10 files changed, 14 insertions(+), 45 deletions(-) diff --git a/main.go b/main.go index 7d761d59d..576d16fbf 100644 --- a/main.go +++ b/main.go @@ -122,33 +122,28 @@ func main() { Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("Redis"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Redis") os.Exit(1) } - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") if err = (&rediscluster.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rcLog, Scheme: mgr.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisCluster") os.Exit(1) } - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") if err = (&redisreplication.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rrLog, Scheme: mgr.GetScheme(), - Pod: k8sutils.NewPodService(k8sclient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rrLog), + Pod: k8sutils.NewPodService(k8sclient), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisReplication") os.Exit(1) @@ -157,7 +152,6 @@ func main() { Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("RedisSentinel"), Scheme: mgr.GetScheme(), ReplicationWatcher: intctrlutil.NewResourceWatcher(), }).SetupWithManager(mgr); err != nil { diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 531261164..061f33e4c 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -23,7 +23,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -36,13 +35,10 @@ type Reconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") instance := &redisv1beta2.Redis{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index ce81f0a9b..d1d53d936 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -27,7 +27,6 @@ import ( intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" retry "github.com/avast/retry-go" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" @@ -42,7 +41,6 @@ type Reconciler struct { k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } diff --git a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go index 4b09ff422..8cfe65206 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go +++ b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go @@ -100,13 +100,12 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 4410f021a..544d69595 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -2,12 +2,12 @@ package redisreplication import ( "context" + "sigs.k8s.io/controller-runtime/pkg/log" "time" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -24,13 +24,11 @@ type Reconciler struct { k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis replication controller") + logger := log.FromContext(ctx, "Request.Namespace", req.Namespace, "Request.Name", req.Name) instance := &redisv1beta2.RedisReplication{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) @@ -64,7 +62,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu var realMaster string masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") if len(masterNodes) > 1 { - reqLogger.Info("Creating redis replication by executing replication creation commands") + logger.Info("Creating redis replication by executing replication creation commands") slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if len(slaveNodes) == 0 { diff --git a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go index 8ef3a7f5f..445f7ce7d 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go +++ b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go @@ -100,14 +100,13 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - Pod: k8sutils.NewPodService(k8sClient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rrLog), + Pod: k8sutils.NewPodService(k8sClient), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redissentinel/redissentinel_controller.go b/pkg/controllers/redissentinel/redissentinel_controller.go index e33b5647d..420730a4d 100644 --- a/pkg/controllers/redissentinel/redissentinel_controller.go +++ b/pkg/controllers/redissentinel/redissentinel_controller.go @@ -7,7 +7,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" @@ -21,15 +20,12 @@ type RedisSentinelReconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme ReplicationWatcher *intctrlutil.ResourceWatcher } func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") instance := &redisv1beta2.RedisSentinel{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) diff --git a/pkg/k8sutils/pod.go b/pkg/k8sutils/pod.go index e52b131fc..21ff33a2a 100644 --- a/pkg/k8sutils/pod.go +++ b/pkg/k8sutils/pod.go @@ -4,9 +4,9 @@ import ( "context" "encoding/json" "fmt" + "sigs.k8s.io/controller-runtime/pkg/log" "strings" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -20,14 +20,11 @@ type Pod interface { type PodService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewPodService(kubeClient kubernetes.Interface, log logr.Logger) *PodService { - log = log.WithValues("service", "k8s.pod") +func NewPodService(kubeClient kubernetes.Interface) *PodService { return &PodService{ kubeClient: kubeClient, - log: log, } } @@ -48,7 +45,7 @@ type patchStringValue struct { } func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName string, labels map[string]string) error { - s.log.Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) + log.FromContext(ctx).Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) var payloads []interface{} for labelKey, labelValue := range labels { @@ -63,7 +60,7 @@ func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName stri _, err := s.kubeClient.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) if err != nil { - s.log.Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) + log.FromContext(ctx).Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) } return err } diff --git a/pkg/k8sutils/redis-sentinel_test.go b/pkg/k8sutils/redis-sentinel_test.go index 56e997762..5f2160748 100644 --- a/pkg/k8sutils/redis-sentinel_test.go +++ b/pkg/k8sutils/redis-sentinel_test.go @@ -9,7 +9,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -292,7 +291,6 @@ func Test_generateRedisSentinelInitContainerParams(t *testing.T) { func Test_getSentinelEnvVariable(t *testing.T) { type args struct { client kubernetes.Interface - logger logr.Logger cr *redisv1beta2.RedisSentinel } tests := []struct { @@ -304,7 +302,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{}, }, want: &[]corev1.EnvVar{}, @@ -313,7 +310,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is not nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{ Spec: redisv1beta2.RedisSentinelSpec{ RedisSentinelConfig: &redisv1beta2.RedisSentinelConfig{ diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index 1eb29765b..5a3939dfb 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -12,7 +12,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -31,14 +30,11 @@ type StatefulSet interface { type StatefulSetService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewStatefulSetService(kubeClient kubernetes.Interface, log logr.Logger) *StatefulSetService { - log = log.WithValues("service", "k8s.statefulset") +func NewStatefulSetService(kubeClient kubernetes.Interface) *StatefulSetService { return &StatefulSetService{ kubeClient: kubeClient, - log: log, } } From d4f3e139df927349223c200fa41d4b4e6779f50f Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 16:24:03 +0800 Subject: [PATCH 5/8] remove logger Signed-off-by: drivebyer --- pkg/controllerutil/controller_common.go | 2 +- pkg/k8sutils/cluster-scaling.go | 26 ++++++------ pkg/k8sutils/pod.go | 2 +- pkg/k8sutils/poddisruption.go | 16 ++++---- pkg/k8sutils/redis-sentinel.go | 2 +- pkg/k8sutils/redis.go | 54 ++++++++++++------------- pkg/k8sutils/services.go | 14 +++---- pkg/k8sutils/statefulset.go | 26 ++++++------ 8 files changed, 71 insertions(+), 71 deletions(-) diff --git a/pkg/controllerutil/controller_common.go b/pkg/controllerutil/controller_common.go index 1b5c7f4d2..f153dea92 100644 --- a/pkg/controllerutil/controller_common.go +++ b/pkg/controllerutil/controller_common.go @@ -18,7 +18,7 @@ func RequeueAfter(ctx context.Context, duration time.Duration, msg string, keysA if msg == "" { msg = "requeue-after" } - log.FromContext(ctx).Info(msg, keysAndValues...) + log.FromContext(ctx).V(1).Info(msg, keysAndValues...) return reconcile.Result{ Requeue: true, RequeueAfter: duration, diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index 3b7044e0a..d8c1d3c34 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -70,10 +70,10 @@ func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, cr *r cmd = append(cmd, "--cluster-yes") - log.FromContext(ctx).Info("Redis cluster reshard command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster reshard command is", "Command", cmd) if slot == "0" { - log.FromContext(ctx).Info("Skipped the execution of", "Cmd", cmd) + log.FromContext(ctx).V(1).Info("Skipped the execution of", "Cmd", cmd) return } executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") @@ -101,7 +101,7 @@ func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, nodeID } } - log.FromContext(ctx).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) + log.FromContext(ctx).V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) return strconv.Itoa(totalSlots) } @@ -128,7 +128,7 @@ func getRedisNodeID(ctx context.Context, client kubernetes.Interface, cr *redisv log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } - log.FromContext(ctx).Info("Redis node ID ", "is", output) + log.FromContext(ctx).V(1).Info("Redis node ID ", "is", output) return output } @@ -161,7 +161,7 @@ func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.In cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - log.FromContext(ctx).Info("Redis cluster rebalance command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } @@ -179,7 +179,7 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *r podSlots := getRedisClusterSlots(ctx, redisClient, podNodeID) if podSlots == "0" || podSlots == "" { - log.FromContext(ctx).Info("Found Empty Redis Leader Node", "pod", pod) + log.FromContext(ctx).V(1).Info("Found Empty Redis Leader Node", "pod", pod) RebalanceRedisClusterEmptyMasters(ctx, client, cr) break } @@ -213,7 +213,7 @@ func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, cr cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - log.FromContext(ctx).Info("Redis cluster rebalance command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } @@ -252,7 +252,7 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, cr cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - log.FromContext(ctx).Info("Redis cluster add-node command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster add-node command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } @@ -269,7 +269,7 @@ func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, stringSlice := strings.Split(slave, " ") slaveIDs = append(slaveIDs, stringSlice[0]) } - log.FromContext(ctx).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) + log.FromContext(ctx).V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) return slaveIDs } @@ -313,7 +313,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. for _, followerNodeID := range followerNodeIDs { cmd = append(cmd, followerNodeID) - log.FromContext(ctx).Info("Redis cluster follower remove command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster follower remove command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") cmd = cmd[:len(cmd)-1] } @@ -357,9 +357,9 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - log.FromContext(ctx).Info("Redis cluster leader remove command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster leader remove command is", "Command", cmd) if getRedisClusterSlots(ctx, redisClient, removePodNodeID) != "0" { - log.FromContext(ctx).Info("Skipping execution remove leader not empty", "cmd", cmd) + log.FromContext(ctx).V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } @@ -419,6 +419,6 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, cr *redis cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, slavePodName)...) - log.FromContext(ctx).Info("Redis cluster failover command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster failover command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, slavePodName) } diff --git a/pkg/k8sutils/pod.go b/pkg/k8sutils/pod.go index 21ff33a2a..8b9dacf35 100644 --- a/pkg/k8sutils/pod.go +++ b/pkg/k8sutils/pod.go @@ -45,7 +45,7 @@ type patchStringValue struct { } func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName string, labels map[string]string) error { - log.FromContext(ctx).Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) + log.FromContext(ctx).V(1).Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) var payloads []interface{} for labelKey, labelValue := range labels { diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 395d2eaab..ed19c18b1 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -30,7 +30,7 @@ func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.Red if err == nil { return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - log.FromContext(ctx).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -52,7 +52,7 @@ func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2. if err == nil { return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - log.FromContext(ctx).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -151,7 +151,7 @@ func patchPodDisruptionBudget(ctx context.Context, storedPdb *policyv1.PodDisrup return err } if !patchResult.IsEmpty() { - log.FromContext(ctx).Info("Changes in PodDisruptionBudget Detected, Updating...", + log.FromContext(ctx).V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", "patch", string(patchResult.Patch), "Current", string(patchResult.Current), "Original", string(patchResult.Original), @@ -178,7 +178,7 @@ func createPodDisruptionBudget(ctx context.Context, namespace string, pdb *polic log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget creation failed") return err } - log.FromContext(ctx).Info("Redis PodDisruptionBudget creation was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget creation was successful") return nil } @@ -189,7 +189,7 @@ func updatePodDisruptionBudget(ctx context.Context, namespace string, pdb *polic log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget update failed") return err } - log.FromContext(ctx).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) return nil } @@ -200,7 +200,7 @@ func deletePodDisruptionBudget(ctx context.Context, namespace string, pdbName st log.FromContext(ctx).Error(err, "Redis PodDisruption deletion failed") return err } - log.FromContext(ctx).Info("Redis PodDisruption delete was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruption delete was successful") return nil } @@ -211,9 +211,9 @@ func GetPodDisruptionBudget(ctx context.Context, namespace string, pdb string, c } pdbInfo, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), pdb, getOpts) if err != nil { - log.FromContext(ctx).Info("Redis PodDisruptionBudget get action failed") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action failed") return nil, err } - log.FromContext(ctx).Info("Redis PodDisruptionBudget get action was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action was successful") return pdbInfo, err } diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 73d29b6d3..d27ee487f 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -315,7 +315,7 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac log.FromContext(ctx).Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) return "" } else { - log.FromContext(ctx).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) } // Marshal CustomObject to JSON diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index c39fc7473..414b5d3e4 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -27,7 +27,7 @@ type RedisDetails struct { // getRedisServerIP will return the IP of redis service func getRedisServerIP(ctx context.Context, client kubernetes.Interface, redisInfo RedisDetails) string { - log.FromContext(ctx).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) redisPod, err := client.CoreV1().Pods(redisInfo.Namespace).Get(context.TODO(), redisInfo.PodName, metav1.GetOptions{}) if err != nil { @@ -36,20 +36,20 @@ func getRedisServerIP(ctx context.Context, client kubernetes.Interface, redisInf } redisIP := redisPod.Status.PodIP - log.FromContext(ctx).Info("Fetched Redis pod IP", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Fetched Redis pod IP", "ip", redisIP) // Check if IP is empty if redisIP == "" { - log.FromContext(ctx).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } // If we're NOT IPv4, assume we're IPv6.. if net.ParseIP(redisIP).To4() == nil { - log.FromContext(ctx).Info("Redis is using IPv6", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Redis is using IPv6", "ip", redisIP) } - log.FromContext(ctx).Info("Successfully got the IP for Redis", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Successfully got the IP for Redis", "ip", redisIP) return redisIP } @@ -77,7 +77,7 @@ func CreateSingleLeaderRedisCommand(ctx context.Context, cr *redisv1beta2.RedisC for i := 0; i < 16384; i++ { cmd = append(cmd, strconv.Itoa(i)) } - log.FromContext(ctx).Info("Generating Redis Add Slots command for single node cluster", + log.FromContext(ctx).V(1).Info("Generating Redis Add Slots command for single node cluster", "BaseCommand", cmd[:3], "SlotsRange", "0-16383", "TotalSlots", 16384) @@ -148,7 +148,7 @@ func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Int } cmd = append(cmd, "--cluster-yes") - log.FromContext(ctx).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) + log.FromContext(ctx).V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) return cmd } @@ -176,7 +176,7 @@ func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - log.FromContext(ctx).Info("Redis cluster creation command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster creation command is", "Command", cmd) executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } @@ -218,7 +218,7 @@ func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interf cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...) - log.FromContext(ctx).Info("Generated Redis replication command", + log.FromContext(ctx).V(1).Info("Generated Redis replication command", "FollowerAddress", followerAddress, "LeaderAddress", leaderAddress, "Command", cmd) @@ -251,7 +251,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter } podIP = getRedisServerIP(ctx, client, followerPod) if !checkRedisNodePresence(ctx, cr, nodes, podIP) { - log.FromContext(ctx).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) cmd := createRedisReplicationCommand(ctx, client, cr, leaderPod, followerPod) redisClient := configureRedisClient(ctx, client, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() @@ -263,10 +263,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter if pong == "PONG" { executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } else { - log.FromContext(ctx).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) } } else { - log.FromContext(ctx).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) } followerIdx++ @@ -315,7 +315,7 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr replicas := cr.Spec.GetReplicaCounts(role) podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { - log.FromContext(ctx).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) + log.FromContext(ctx).V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) client := configureRedisClient(ctx, client, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") @@ -340,7 +340,7 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } - log.FromContext(ctx).Info("Redis cluster failover executed", "Output", output) + log.FromContext(ctx).V(1).Info("Redis cluster failover executed", "Output", output) } return nil } @@ -371,9 +371,9 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, cr *r count++ } } - log.FromContext(ctx).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) + log.FromContext(ctx).V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) } else { - log.FromContext(ctx).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) + log.FromContext(ctx).V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) } return int32(count) } @@ -420,7 +420,7 @@ func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, c count++ } } - log.FromContext(ctx).Info("Number of failed nodes in cluster", "Failed Node Count", count) + log.FromContext(ctx).V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) return count, nil } @@ -464,7 +464,7 @@ func executeCommand(ctx context.Context, client kubernetes.Interface, cr *redisv log.FromContext(ctx).Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) return } - log.FromContext(ctx).Info("Successfully executed the command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) } func executeCommand1(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { @@ -515,20 +515,20 @@ func getContainerID(ctx context.Context, client kubernetes.Interface, cr *redisv return -1, nil } - log.FromContext(ctx).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) targetContainer := -1 for containerID, tr := range pod.Spec.Containers { - log.FromContext(ctx).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) if tr.Name == cr.ObjectMeta.Name+"-leader" { targetContainer = containerID - log.FromContext(ctx).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) break } } if targetContainer == -1 { - log.FromContext(ctx).Info("Leader container not found in pod", "Pod Name", podName) + log.FromContext(ctx).V(1).Info("Leader container not found in pod", "Pod Name", podName) return -1, nil } @@ -537,7 +537,7 @@ func getContainerID(ctx context.Context, client kubernetes.Interface, cr *redisv // checkRedisNodePresence will check if the redis node exist in cluster or not func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { - log.FromContext(ctx).Info("Checking if Node is in cluster", "Node", nodeName) + log.FromContext(ctx).V(1).Info("Checking if Node is in cluster", "Node", nodeName) for _, node := range nodeList { s := strings.Split(node[1], ":") if s[0] == nodeName { @@ -606,7 +606,7 @@ func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, podNam for _, line := range lines { if strings.HasPrefix(line, "role:") { role := strings.TrimPrefix(line, "role:") - log.FromContext(ctx).Info("Role of the Redis Pod", "pod", podName, "role", role) + log.FromContext(ctx).V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) return role } } @@ -631,7 +631,7 @@ func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, podName log.FromContext(ctx).Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) return -1 } - log.FromContext(ctx).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) + log.FromContext(ctx).V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) return connected_slaves } } @@ -641,7 +641,7 @@ func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, podName } func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { - log.FromContext(ctx).Info("Redis Master Node is set to", "pod", realMasterPod) + log.FromContext(ctx).V(1).Info("Redis Master Node is set to", "pod", realMasterPod) realMasterInfo := RedisDetails{ PodName: realMasterPod, Namespace: cr.Namespace, @@ -653,7 +653,7 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa if masterPods[i] != realMasterPod { redisClient := configureRedisReplicationClient(ctx, client, cr, masterPods[i]) defer redisClient.Close() - log.FromContext(ctx).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() if err != nil { log.FromContext(ctx).Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) diff --git a/pkg/k8sutils/services.go b/pkg/k8sutils/services.go index cd8a49d4f..582c3500f 100644 --- a/pkg/k8sutils/services.go +++ b/pkg/k8sutils/services.go @@ -100,7 +100,7 @@ func createService(ctx context.Context, kusClient kubernetes.Interface, namespac log.FromContext(ctx).Error(err, "Redis service creation is failed") return err } - log.FromContext(ctx).Info("Redis service creation is successful") + log.FromContext(ctx).V(1).Info("Redis service creation is successful") return nil } @@ -111,7 +111,7 @@ func updateService(ctx context.Context, k8sClient kubernetes.Interface, namespac log.FromContext(ctx).Error(err, "Redis service update failed") return err } - log.FromContext(ctx).Info("Redis service updated successfully") + log.FromContext(ctx).V(1).Info("Redis service updated successfully") return nil } @@ -122,10 +122,10 @@ func getService(ctx context.Context, k8sClient kubernetes.Interface, namespace s } serviceInfo, err := k8sClient.CoreV1().Services(namespace).Get(context.TODO(), name, getOpts) if err != nil { - log.FromContext(ctx).Info("Redis service get action is failed") + log.FromContext(ctx).V(1).Info("Redis service get action is failed") return nil, err } - log.FromContext(ctx).Info("Redis service get action is successful") + log.FromContext(ctx).V(1).Info("Redis service get action is successful") return serviceInfo, nil } @@ -166,7 +166,7 @@ func patchService(ctx context.Context, storedService *corev1.Service, newService return err } if !patchResult.IsEmpty() { - log.FromContext(ctx).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) for key, value := range storedService.Annotations { if _, present := newService.Annotations[key]; !present { @@ -177,9 +177,9 @@ func patchService(ctx context.Context, storedService *corev1.Service, newService log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } - log.FromContext(ctx).Info("Syncing Redis service with defined properties") + log.FromContext(ctx).V(1).Info("Syncing Redis service with defined properties") return updateService(ctx, cl, namespace, newService) } - log.FromContext(ctx).Info("Redis service is already in-sync") + log.FromContext(ctx).V(1).Info("Redis service is already in-sync") return nil } diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index 5a3939dfb..9c26c456d 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -58,19 +58,19 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, } if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) { - log.FromContext(ctx).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) return false } if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - log.FromContext(ctx).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) return false } if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - log.FromContext(ctx).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) return false } if int(sts.Status.ReadyReplicas) != replicas { - log.FromContext(ctx).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) return false } return true @@ -183,7 +183,7 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n return err } if !patchResult.IsEmpty() { - log.FromContext(ctx).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) if len(newStateful.Spec.VolumeClaimTemplates) >= 1 && len(newStateful.Spec.VolumeClaimTemplates) == len(storedStateful.Spec.VolumeClaimTemplates) { // Field is immutable therefore we MUST keep it as is. if !apiequality.Semantic.DeepEqual(newStateful.Spec.VolumeClaimTemplates[0].Spec, storedStateful.Spec.VolumeClaimTemplates[0].Spec) { @@ -240,9 +240,9 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n annotations["storageCapacity"] = fmt.Sprintf("%d", stateCapacity) storedStateful.Annotations = annotations if realUpdate { - log.FromContext(ctx).Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) } else { - log.FromContext(ctx).Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) } } } @@ -264,7 +264,7 @@ func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, n } return updateStatefulSet(ctx, cl, namespace, newStateful, recreateStateFulSet) } - log.FromContext(ctx).Info("Reconciliation Complete, no Changes required.") + log.FromContext(ctx).V(1).Info("Reconciliation Complete, no Changes required.") return nil } @@ -767,7 +767,7 @@ func createStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace s log.FromContext(ctx).Error(err, "Redis stateful creation failed") return err } - log.FromContext(ctx).Info("Redis stateful successfully created") + log.FromContext(ctx).V(1).Info("Redis stateful successfully created") return nil } @@ -781,7 +781,7 @@ func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace s for messageCount, cause := range sErr.ErrStatus.Details.Causes { failMsg[messageCount] = cause.Message } - log.FromContext(ctx).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) + log.FromContext(ctx).V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) propagationPolicy := metav1.DeletePropagationForeground if err := cl.AppsV1().StatefulSets(namespace).Delete(context.TODO(), stateful.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { //nolint return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") @@ -792,7 +792,7 @@ func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace s log.FromContext(ctx).Error(err, "Redis statefulset update failed") return err } - log.FromContext(ctx).Info("Redis statefulset successfully updated ") + log.FromContext(ctx).V(1).Info("Redis statefulset successfully updated ") return nil } @@ -803,10 +803,10 @@ func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace stri } statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, getOpts) if err != nil { - log.FromContext(ctx).Info("Redis statefulset get action failed") + log.FromContext(ctx).V(1).Info("Redis statefulset get action failed") return nil, err } - log.FromContext(ctx).Info("Redis statefulset get action was successful") + log.FromContext(ctx).V(1).Info("Redis statefulset get action was successful") return statefulInfo, nil } From 60f8c025df609e68f1bc1df504729a511e97b359 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Fri, 15 Nov 2024 16:36:04 +0800 Subject: [PATCH 6/8] lint Signed-off-by: drivebyer --- pkg/controllers/rediscluster/rediscluster_controller.go | 2 +- pkg/controllerutil/controller_common.go | 2 +- pkg/k8sutils/finalizer_test.go | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index d1d53d936..0c79a2a56 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -19,7 +19,6 @@ package rediscluster import ( "context" "fmt" - "sigs.k8s.io/controller-runtime/pkg/log" "time" "github.com/OT-CONTAINER-KIT/redis-operator/api/status" @@ -33,6 +32,7 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) // Reconciler reconciles a RedisCluster object diff --git a/pkg/controllerutil/controller_common.go b/pkg/controllerutil/controller_common.go index f153dea92..2163b0291 100644 --- a/pkg/controllerutil/controller_common.go +++ b/pkg/controllerutil/controller_common.go @@ -2,10 +2,10 @@ package controllerutil import ( "context" - "sigs.k8s.io/controller-runtime/pkg/log" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index 3f6e7cfb2..1267b79e8 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -261,7 +261,6 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -460,7 +459,6 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) From a6d6d899af81807ba2987388991b49442cdac800 Mon Sep 17 00:00:00 2001 From: "yang.wu" Date: Fri, 15 Nov 2024 17:10:50 +0800 Subject: [PATCH 7/8] update --- pkg/k8sutils/cluster-scaling.go | 2 +- pkg/k8sutils/finalizer_test.go | 2 -- pkg/k8sutils/pod.go | 2 +- pkg/k8sutils/redis.go | 4 ++-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index d8c1d3c34..b2bd5a0da 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -3,13 +3,13 @@ package k8sutils import ( "context" "fmt" - "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" redis "github.com/redis/go-redis/v9" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // ReshardRedisCluster transfer the slots from the last node to the first node. diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index 1267b79e8..8a8bdeba4 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -131,7 +131,6 @@ func TestHandleRedisFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(tc.existingPVC.DeepCopyObject()) @@ -554,7 +553,6 @@ func TestHandleRedisSentinelFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, tc.cr) if tc.expectError { assert.Error(t, err) diff --git a/pkg/k8sutils/pod.go b/pkg/k8sutils/pod.go index 8b9dacf35..c79a95f86 100644 --- a/pkg/k8sutils/pod.go +++ b/pkg/k8sutils/pod.go @@ -4,13 +4,13 @@ import ( "context" "encoding/json" "fmt" - "sigs.k8s.io/controller-runtime/pkg/log" "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Pod interface { diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index 414b5d3e4..b62918773 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -6,10 +6,11 @@ import ( "encoding/csv" "fmt" "net" - "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" + "sigs.k8s.io/controller-runtime/pkg/log" + redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" @@ -321,7 +322,6 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) if err != nil { - log.FromContext(ctx).Error(err, "Redis command failed with this error") flushcommand := redis.NewStringCmd(ctx, "flushall") err = client.Process(ctx, flushcommand) From 95256a731d15b8f7174e1f1a602b119cb0369195 Mon Sep 17 00:00:00 2001 From: "yang.wu" Date: Fri, 15 Nov 2024 17:15:10 +0800 Subject: [PATCH 8/8] update --- .../redisreplication/redisreplication_controller.go | 2 +- pkg/k8sutils/finalizer_test.go | 3 --- pkg/k8sutils/poddisruption.go | 2 +- pkg/k8sutils/redis-cluster.go | 2 +- pkg/k8sutils/redis-replication.go | 2 +- pkg/k8sutils/redis-sentinel.go | 2 +- pkg/k8sutils/redis-standalone.go | 1 + pkg/k8sutils/redis.go | 3 +-- pkg/k8sutils/redis_test.go | 3 --- pkg/k8sutils/services.go | 2 +- pkg/k8sutils/services_test.go | 3 --- pkg/k8sutils/statefulset.go | 2 +- pkg/k8sutils/statefulset_test.go | 4 ---- pkg/k8sutils/status.go | 2 +- 14 files changed, 10 insertions(+), 23 deletions(-) diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 544d69595..6b71bf56f 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -2,7 +2,6 @@ package redisreplication import ( "context" - "sigs.k8s.io/controller-runtime/pkg/log" "time" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" @@ -15,6 +14,7 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) // Reconciler reconciles a RedisReplication object diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index 8a8bdeba4..4f46d8f7f 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -592,7 +592,6 @@ func TestFinalizeRedisPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - cr := &v1beta2.Redis{ ObjectMeta: metav1.ObjectMeta{ Name: "test-redis", @@ -689,7 +688,6 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -760,7 +758,6 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index ed19c18b1..6dc97a970 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -3,7 +3,6 @@ package k8sutils import ( "context" "fmt" - "sigs.k8s.io/controller-runtime/pkg/log" commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" @@ -13,6 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateRedisLeaderPodDisruptionBudget check and create a PodDisruptionBudget for Leaders diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index 1b2c73219..beb6ac9b7 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -2,7 +2,6 @@ package k8sutils import ( "context" - "sigs.k8s.io/controller-runtime/pkg/log" "strconv" "strings" @@ -12,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisClusterSTS is a interface to call Redis Statefulset function diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index e288b64f1..ad6a84128 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -2,13 +2,13 @@ package k8sutils import ( "context" - "sigs.k8s.io/controller-runtime/pkg/log" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateReplicationService method will create replication service for Redis diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index d27ee487f..4b1a56ff2 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "sigs.k8s.io/controller-runtime/pkg/log" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" @@ -14,6 +13,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisSentinelSTS is a interface to call Redis Statefulset function diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index af20aaef3..11bf31ac1 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -2,6 +2,7 @@ package k8sutils import ( "context" + redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/kubernetes" diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index b62918773..ea561e634 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -9,8 +9,6 @@ import ( "strconv" "strings" - "sigs.k8s.io/controller-runtime/pkg/log" - redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" @@ -18,6 +16,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisDetails will hold the information for Redis Pod diff --git a/pkg/k8sutils/redis_test.go b/pkg/k8sutils/redis_test.go index 674c92aa1..8f6035f3c 100644 --- a/pkg/k8sutils/redis_test.go +++ b/pkg/k8sutils/redis_test.go @@ -284,7 +284,6 @@ func TestGetRedisHostname(t *testing.T) { } func TestCreateSingleLeaderRedisCommand(t *testing.T) { - cr := &redisv1beta2.RedisCluster{} cmd := CreateSingleLeaderRedisCommand(context.TODO(), cr) @@ -388,7 +387,6 @@ func TestGetRedisTLSArgs(t *testing.T) { } func TestCreateRedisReplicationCommand(t *testing.T) { - type secret struct { name string namespace string @@ -626,7 +624,6 @@ func TestGetContainerID(t *testing.T) { } func Test_checkAttachedSlave(t *testing.T) { - tests := []struct { name string podName string diff --git a/pkg/k8sutils/services.go b/pkg/k8sutils/services.go index 582c3500f..1ad9aa63d 100644 --- a/pkg/k8sutils/services.go +++ b/pkg/k8sutils/services.go @@ -2,7 +2,6 @@ package k8sutils import ( "context" - "sigs.k8s.io/controller-runtime/pkg/log" "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" @@ -10,6 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( diff --git a/pkg/k8sutils/services_test.go b/pkg/k8sutils/services_test.go index c5ef6b05f..96e2fc1c5 100644 --- a/pkg/k8sutils/services_test.go +++ b/pkg/k8sutils/services_test.go @@ -310,7 +310,6 @@ func Test_createService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tt.exist { k8sClient = k8sClientFake.NewSimpleClientset(tt.service.DeepCopyObject()) @@ -406,7 +405,6 @@ func Test_updateService(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - k8sClient := k8sClientFake.NewSimpleClientset(tt.current.DeepCopyObject()) err := updateService(context.TODO(), k8sClient, tt.servinceNamespace, tt.updated) @@ -459,7 +457,6 @@ func Test_getService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var k8sClient *k8sClientFake.Clientset if tt.have != nil { k8sClient = k8sClientFake.NewSimpleClientset(tt.have.DeepCopyObject()) diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index 9c26c456d..63060e1fb 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "path" - "sigs.k8s.io/controller-runtime/pkg/log" "sort" "strconv" "strings" @@ -22,6 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) type StatefulSet interface { diff --git a/pkg/k8sutils/statefulset_test.go b/pkg/k8sutils/statefulset_test.go index dc3dd341a..b412c7456 100644 --- a/pkg/k8sutils/statefulset_test.go +++ b/pkg/k8sutils/statefulset_test.go @@ -195,7 +195,6 @@ func TestGetVolumeMount(t *testing.T) { } func Test_GetStatefulSet(t *testing.T) { - tests := []struct { name string sts appsv1.StatefulSet @@ -238,7 +237,6 @@ func Test_GetStatefulSet(t *testing.T) { } func Test_createStatefulSet(t *testing.T) { - tests := []struct { name string sts appsv1.StatefulSet @@ -287,7 +285,6 @@ func Test_createStatefulSet(t *testing.T) { } func TestUpdateStatefulSet(t *testing.T) { - tests := []struct { name string existingStsSpec appsv1.StatefulSetSpec @@ -409,7 +406,6 @@ func TestUpdateStatefulSet(t *testing.T) { } func TestCreateOrUpdateStateFul(t *testing.T) { - tests := []struct { name string stsParams statefulSetParameters diff --git a/pkg/k8sutils/status.go b/pkg/k8sutils/status.go index ddd47e3c6..9f2f19216 100644 --- a/pkg/k8sutils/status.go +++ b/pkg/k8sutils/status.go @@ -3,7 +3,6 @@ package k8sutils import ( "context" "reflect" - "sigs.k8s.io/controller-runtime/pkg/log" "github.com/OT-CONTAINER-KIT/redis-operator/api/status" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" @@ -12,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/log" ) // UpdateRedisClusterStatus will update the status of the RedisCluster