diff --git a/main.go b/main.go index 9d46df50b..576d16fbf 100644 --- a/main.go +++ b/main.go @@ -118,37 +118,32 @@ func main() { os.Exit(1) } - if err = (&redis.RedisReconciler{ + if err = (&redis.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("Redis"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Redis") os.Exit(1) } - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - if err = (&rediscluster.RedisClusterReconciler{ + if err = (&rediscluster.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rcLog, Scheme: mgr.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisCluster") os.Exit(1) } - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - if err = (&redisreplication.RedisReplicationReconciler{ + if err = (&redisreplication.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rrLog, Scheme: mgr.GetScheme(), - Pod: k8sutils.NewPodService(k8sclient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rrLog), + Pod: k8sutils.NewPodService(k8sclient), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisReplication") os.Exit(1) @@ -157,7 +152,6 @@ func main() { Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("RedisSentinel"), Scheme: mgr.GetScheme(), ReplicationWatcher: intctrlutil.NewResourceWatcher(), }).SetupWithManager(mgr); err != nil { diff --git a/mocks/utils/utils.go b/mocks/utils/utils.go index b48dad6a2..93bf3d90b 100644 --- a/mocks/utils/utils.go +++ b/mocks/utils/utils.go @@ -1,6 +1,7 @@ package utils import ( + "context" "fmt" "strconv" @@ -76,7 +77,7 @@ func CreateFakeObjectWithSecret(name, namespace, key string) []runtime.Object { return []runtime.Object{secret} } -func CreateFakeClientWithSecrets(cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { +func CreateFakeClientWithSecrets(ctx context.Context, cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { leaderReplicas := cr.Spec.GetReplicaCounts("leader") followerReplicas := cr.Spec.GetReplicaCounts("follower") pods := make([]runtime.Object, 0) diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 41f11d811..061f33e4c 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -23,7 +23,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -31,49 +30,46 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RedisReconciler reconciles a Redis object -type RedisReconciler struct { +// Reconciler reconciles a Redis object +type Reconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { instance := &redisv1beta2.Redis{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis finalizer") + if err = k8sutils.HandleRedisFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redis.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } - err = k8sutils.CreateStandaloneRedis(instance, r.K8sClient) + err = k8sutils.CreateStandaloneRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create redis") + return intctrlutil.RequeueWithError(ctx, err, "failed to create redis") } - err = k8sutils.CreateStandaloneService(instance, r.K8sClient) + err = k8sutils.CreateStandaloneService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create service") + return intctrlutil.RequeueWithError(ctx, err, "failed to create service") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "requeue after 10 seconds") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "requeue after 10 seconds") } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.Redis{}). Complete(r) diff --git a/pkg/controllers/redis/redis_controller_suite_test.go b/pkg/controllers/redis/redis_controller_suite_test.go index a7a7b4b59..594bd2007 100644 --- a/pkg/controllers/redis/redis_controller_suite_test.go +++ b/pkg/controllers/redis/redis_controller_suite_test.go @@ -99,7 +99,7 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - err = (&RedisReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index acfc56079..0c79a2a56 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -26,42 +26,40 @@ import ( intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" retry "github.com/avast/retry-go" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// RedisClusterReconciler reconciles a RedisCluster object -type RedisClusterReconciler struct { +// Reconciler reconciles a RedisCluster object +type Reconciler struct { client.Client k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.V(1).Info("Reconciling opstree redis Cluster controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) instance := &redisv1beta2.RedisCluster{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis cluster instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis cluster instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisClusterFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis cluster finalizer") + if err = k8sutils.HandleRedisClusterFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis cluster finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["rediscluster.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } instance.SetDefault() @@ -69,83 +67,83 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request followerReplicas := instance.Spec.GetReplicaCounts("follower") totalReplicas := leaderReplicas + followerReplicas - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } // Check if the cluster is downscaled - if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader"); leaderReplicas < leaderCount { - reqLogger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) + if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader"); leaderReplicas < leaderCount { + logger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) for shardIdx := leaderCount - 1; shardIdx >= leaderReplicas; shardIdx-- { - reqLogger.Info("Remove the shard", "Shard.Index", shardIdx) + logger.Info("Remove the shard", "Shard.Index", shardIdx) // Imp if the last index of leader sts is not leader make it then // check whether the redis is leader or not ? // if not true then make it leader pod - if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, r.Log, instance)) { + if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, instance)) { // lastLeaderPod is slaving right now Make it the master Pod // We have to bring a manual failover here to make it a leaderPod // clusterFailover should also include the clusterReplicate since we have to map the followers to new leader - k8sutils.ClusterFailover(ctx, r.K8sClient, r.Log, instance) + k8sutils.ClusterFailover(ctx, r.K8sClient, instance) } // Step 1 Remove the Follower Node - k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, instance) // Step 2 Reshard the Cluster - k8sutils.ReshardRedisCluster(r.K8sClient, r.Log, instance, true) + k8sutils.ReshardRedisCluster(ctx, r.K8sClient, instance, true) } - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster") + logger.Info("Redis cluster is downscaled... Rebalancing the cluster") // Step 3 Rebalance the cluster - k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance) - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + k8sutils.RebalanceRedisCluster(ctx, r.K8sClient, instance) + logger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // Mark the cluster status as initializing if there are no leader or follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyLeaderReplicas != leaderReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } if leaderReplicas != 0 { - err = k8sutils.CreateRedisLeaderService(instance, r.K8sClient) + err = k8sutils.CreateRedisLeaderService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - err = k8sutils.CreateRedisLeader(instance, r.K8sClient) + err = k8sutils.CreateRedisLeader(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") { // Mark the cluster status as initializing if there are no follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyFollowerReplicas != followerReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } // if we have followers create their service. if followerReplicas != 0 { - err = k8sutils.CreateRedisFollowerService(instance, r.K8sClient) + err = k8sutils.CreateRedisFollowerService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - err = k8sutils.CreateRedisFollower(instance, r.K8sClient) + err = k8sutils.CreateRedisFollower(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } @@ -155,57 +153,57 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as bootstrapping if all the leader and follower nodes are ready if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, ""); nc != totalReplicas { - reqLogger.Info("Creating redis cluster by executing cluster creation commands") - leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader") + if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, ""); nc != totalReplicas { + logger.Info("Creating redis cluster by executing cluster creation commands") + leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader") if leaderCount != leaderReplicas { - reqLogger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) + logger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) if leaderCount <= 2 { - k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, r.Log, instance) + k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, instance) } else { if leaderCount < leaderReplicas { // Scale up the cluster // Step 2 : Add Redis Node - k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, instance) // Step 3 Rebalance the cluster using the empty masters - k8sutils.RebalanceRedisClusterEmptyMasters(r.K8sClient, r.Log, instance) + k8sutils.RebalanceRedisClusterEmptyMasters(ctx, r.K8sClient, instance) } } } else { if followerReplicas > 0 { - reqLogger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) - k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, r.Log, instance) + logger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, instance) } else { - reqLogger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + logger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) + return intctrlutil.RequeueAfter(ctx, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) } - reqLogger.V(1).Info("Number of Redis nodes match desired") - unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + logger.Info("Number of Redis nodes match desired") + unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if err != nil { - reqLogger.Error(err, "failed to determine unhealthy node count in cluster") + logger.Error(err, "failed to determine unhealthy node count in cluster") } if int(totalReplicas) > 1 && unhealthyNodeCount >= int(totalReplicas)-1 { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - reqLogger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") - if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, r.Log, instance); err != nil { - reqLogger.Error(err, "failed to repair disconnected masters") + logger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") + if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, instance); err != nil { + logger.Error(err, "failed to repair disconnected masters") } err = retry.Do(func() error { - nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if nErr != nil { return nErr } @@ -216,34 +214,34 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request }, retry.Attempts(3), retry.Delay(time.Second*5)) if err == nil { - reqLogger.Info("repairing unhealthy masters successful, no unhealthy masters left") - return intctrlutil.RequeueAfter(reqLogger, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") + logger.Info("repairing unhealthy masters successful, no unhealthy masters left") + return intctrlutil.RequeueAfter(ctx, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") } - reqLogger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") - if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + logger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") + if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } } // Check If there is No Empty Master Node - if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") == totalReplicas { - k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, r.Log, instance) + if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "") == totalReplicas { + k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, instance) } // Mark the cluster status as ready if all the leader and follower nodes are ready if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas { - if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, r.Log, instance) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, instance) { + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // SetupWithManager sets up the controller with the Manager. -func (r *RedisClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisCluster{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go index 5f9d5ef2d..8cfe65206 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go +++ b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go @@ -100,13 +100,12 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - err = (&RedisClusterReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index fb5955d6e..6b71bf56f 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -7,7 +7,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -15,76 +14,75 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// RedisReplicationReconciler reconciles a RedisReplication object -type RedisReplicationReconciler struct { +// Reconciler reconciles a RedisReplication object +type Reconciler struct { client.Client k8sutils.Pod k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis replication controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx, "Request.Namespace", req.Namespace, "Request.Name", req.Name) instance := &redisv1beta2.RedisReplication{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisReplicationFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.CreateReplicationRedis(instance, r.K8sClient) + err = k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.CreateReplicationService(instance, r.K8sClient) + err = k8sutils.CreateReplicationService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if !r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name) { return intctrlutil.Reconciled() } var realMaster string - masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "master") + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") if len(masterNodes) > 1 { - reqLogger.Info("Creating redis replication by executing replication creation commands") - slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "slave") - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + logger.Info("Creating redis replication by executing replication creation commands") + slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if len(slaveNodes) == 0 { realMaster = masterNodes[0] } - if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, r.Log, instance, masterNodes, realMaster); err != nil { - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "") + if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, instance, masterNodes, realMaster); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") } } - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if err = r.UpdateRedisReplicationMaster(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if err = r.UpdateRedisPodRoleLabel(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } -func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { if instance.Status.MasterNode == masterNode { return nil } @@ -95,7 +93,7 @@ func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Co return nil } -func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { labels := k8sutils.GetRedisReplicationLabels(cr) pods, err := r.ListPods(ctx, cr.GetNamespace(), labels) if err != nil { @@ -121,7 +119,7 @@ func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisReplication{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go index 0081b194e..445f7ce7d 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go +++ b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go @@ -100,14 +100,13 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - err = (&RedisReplicationReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - Pod: k8sutils.NewPodService(k8sClient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rrLog), + Pod: k8sutils.NewPodService(k8sClient), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redissentinel/redissentinel_controller.go b/pkg/controllers/redissentinel/redissentinel_controller.go index feb1ba695..420730a4d 100644 --- a/pkg/controllers/redissentinel/redissentinel_controller.go +++ b/pkg/controllers/redissentinel/redissentinel_controller.go @@ -7,7 +7,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" @@ -21,41 +20,38 @@ type RedisSentinelReconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme ReplicationWatcher *intctrlutil.ResourceWatcher } func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") instance := &redisv1beta2.RedisSentinel{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisSentinelFinalizer(r.Client, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisSentinelFinalizer(ctx, r.Client, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redissentinel.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } // Get total Sentinel Replicas // sentinelReplicas := instance.Spec.GetSentinelCounts("sentinel") - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } - if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, reqLogger, r.K8sClient, r.Dk8sClient, instance) { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "Redis Replication is specified but not ready") + if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, r.K8sClient, r.Dk8sClient, instance) { + return intctrlutil.RequeueAfter(ctx, time.Second*10, "Redis Replication is specified but not ready") } if instance.Spec.RedisSentinelConfig != nil { @@ -70,20 +66,20 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Create Redis Sentinel - err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient, r.Dk8sClient) + err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, instance, r.K8sClient, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileSentinelPodDisruptionBudget(instance, instance.Spec.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileSentinelPodDisruptionBudget(ctx, instance, instance.Spec.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } // Create the Service for Redis Sentinel - err = k8sutils.CreateRedisSentinelService(instance, r.K8sClient) + err = k8sutils.CreateRedisSentinelService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } diff --git a/pkg/controllerutil/controller_common.go b/pkg/controllerutil/controller_common.go index ba063060b..2163b0291 100644 --- a/pkg/controllerutil/controller_common.go +++ b/pkg/controllerutil/controller_common.go @@ -1,10 +1,11 @@ package controllerutil import ( + "context" "time" - "github.com/go-logr/logr" apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -12,29 +13,29 @@ func Reconciled() (reconcile.Result, error) { return reconcile.Result{}, nil } -func RequeueAfter(logger logr.Logger, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueAfter(ctx context.Context, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { keysAndValues = append(keysAndValues, "duration", duration.String()) if msg == "" { msg = "requeue-after" } - logger.V(1).Info(msg, keysAndValues...) + log.FromContext(ctx).V(1).Info(msg, keysAndValues...) return reconcile.Result{ Requeue: true, RequeueAfter: duration, }, nil } -func RequeueWithError(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if msg == "" { msg = "requeue with error" } - logger.Error(err, msg, keysAndValues...) + log.FromContext(ctx).Error(err, msg, keysAndValues...) return reconcile.Result{}, err } -func RequeueWithErrorChecking(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithErrorChecking(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if apierrors.IsNotFound(err) { return Reconciled() } - return RequeueWithError(err, logger, msg, keysAndValues...) + return RequeueWithError(ctx, err, msg, keysAndValues...) } diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index 4b6edec9e..b2bd5a0da 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -7,21 +7,20 @@ import ( "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // ReshardRedisCluster transfer the slots from the last node to the first node. // // NOTE: when all slot been transferred, the node become slave of the first master node. -func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { - ctx := context.TODO() - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, remove bool) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") // Transfer Pod details transferPOD := RedisDetails{ @@ -38,13 +37,13 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(transferPOD, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, transferPOD, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, transferPOD, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -55,41 +54,41 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re //--cluster-from --cluster-to --cluster-slots --cluster-yes // Remove Node - removeNodeID := getRedisNodeID(ctx, client, logger, cr, removePOD) + removeNodeID := getRedisNodeID(ctx, client, cr, removePOD) cmd = append(cmd, "--cluster-from") cmd = append(cmd, removeNodeID) // Transfer Node - transferNodeID := getRedisNodeID(ctx, client, logger, cr, transferPOD) + transferNodeID := getRedisNodeID(ctx, client, cr, transferPOD) cmd = append(cmd, "--cluster-to") cmd = append(cmd, transferNodeID) // Cluster Slots - slot := getRedisClusterSlots(ctx, redisClient, logger, removeNodeID) + slot := getRedisClusterSlots(ctx, redisClient, removeNodeID) cmd = append(cmd, "--cluster-slots") cmd = append(cmd, slot) cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster reshard command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster reshard command is", "Command", cmd) if slot == "0" { - logger.V(1).Info("Skipped the execution of", "Cmd", cmd) + log.FromContext(ctx).V(1).Info("Skipped the execution of", "Cmd", cmd) return } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if remove { - RemoveRedisNodeFromCluster(ctx, client, logger, cr, removePOD) + RemoveRedisNodeFromCluster(ctx, client, cr, removePOD) } } -func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger logr.Logger, nodeID string) string { +func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, nodeID string) string { totalSlots := 0 redisSlots, err := redisClient.ClusterSlots(ctx).Result() if err != nil { - logger.Error(err, "Failed to Get Cluster Slots") + log.FromContext(ctx).Error(err, "Failed to Get Cluster Slots") return "" } for _, slot := range redisSlots { @@ -102,39 +101,39 @@ func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger } } - logger.V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) + log.FromContext(ctx).V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) return strconv.Itoa(totalSlots) } // getRedisNodeID would return nodeID of a redis node by passing pod -func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { - redisClient := configureRedisClient(client, logger, cr, pod.PodName) +func getRedisNodeID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { + redisClient := configureRedisClient(ctx, client, cr, pod.PodName) defer redisClient.Close() pong, err := redisClient.Ping(ctx).Result() if err != nil || pong != "PONG" { - logger.Error(err, "Failed to ping Redis server") + log.FromContext(ctx).Error(err, "Failed to ping Redis server") return "" } cmd := redis.NewStringCmd(ctx, "cluster", "myid") err = redisClient.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } - logger.V(1).Info("Redis node ID ", "is", output) + log.FromContext(ctx).V(1).Info("Redis node ID ", "is", output) return output } // Rebalance the Redis CLuster using the Empty Master Nodes -func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : --cluster-use-empty-masters -a var cmd []string pod := RedisDetails{ @@ -146,15 +145,15 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } cmd = append(cmd, "--cluster-use-empty-masters") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -162,13 +161,13 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } -func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, cr, "leader") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() for i := 0; i < int(totalRedisLeaderNodes); i++ { @@ -176,19 +175,19 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logge PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(i), Namespace: cr.Namespace, } - podNodeID := getRedisNodeID(ctx, client, logger, cr, pod) - podSlots := getRedisClusterSlots(ctx, redisClient, logger, podNodeID) + podNodeID := getRedisNodeID(ctx, client, cr, pod) + podSlots := getRedisClusterSlots(ctx, redisClient, podNodeID) if podSlots == "0" || podSlots == "" { - logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) - RebalanceRedisClusterEmptyMasters(client, logger, cr) + log.FromContext(ctx).V(1).Info("Found Empty Redis Leader Node", "pod", pod) + RebalanceRedisClusterEmptyMasters(ctx, client, cr) break } } } // Rebalance Redis Cluster Would Rebalance the Redis Cluster without using the empty masters -func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : -a var cmd []string pod := RedisDetails{ @@ -200,13 +199,13 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -214,14 +213,14 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } // Add redis cluster node would add a node to the existing redis cluster using redis-cli -func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - activeRedisNode := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + activeRedisNode := CheckRedisNodeCount(ctx, client, cr, "leader") newPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(int(activeRedisNode)), @@ -238,14 +237,14 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisHostname(newPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, newPod, *cr.Spec.Port)) - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, newPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -253,16 +252,16 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster add-node command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster add-node command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader -func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, logger logr.Logger, masterNodeID string) []string { +func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, masterNodeID string) []string { // 3acb029fead40752f432c84f9bed2e639119a573 192.168.84.239:6379@16379,redis-cluster-v1beta2-follower-5 slave e3299968586dd457a8dba04fc6c747cecd38510f 0 1713595736542 6 connected slaveNodes, err := redisClient.ClusterSlaves(ctx, masterNodeID).Result() if err != nil { - logger.Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) + log.FromContext(ctx).Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) return nil } slaveIDs := make([]string, 0, len(slaveNodes)) @@ -270,16 +269,16 @@ func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, stringSlice := strings.Split(slave, " ") slaveIDs = append(slaveIDs, stringSlice[0]) } - logger.V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) + log.FromContext(ctx).V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) return slaveIDs } // Remove redis follower node would remove all follower nodes of last leader node using redis-cli -func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -293,39 +292,39 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. cmd = []string{"redis-cli"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod) - followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, logger, lastLeaderPodNodeID) + lastLeaderPodNodeID := getRedisNodeID(ctx, client, cr, lastLeaderPod) + followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, lastLeaderPodNodeID) cmd = append(cmd, "--cluster", "del-node") if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } for _, followerNodeID := range followerNodeIDs { cmd = append(cmd, followerNodeID) - logger.V(1).Info("Redis cluster follower remove command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster follower remove command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") cmd = cmd[:len(cmd)-1] } } // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli -func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { +func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - // currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + // currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -341,16 +340,16 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } - removePodNodeID := getRedisNodeID(ctx, client, logger, cr, removePod) + removePodNodeID := getRedisNodeID(ctx, client, cr, removePod) cmd = append(cmd, removePodNodeID) if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -358,26 +357,26 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd) - if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" { - logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster leader remove command is", "Command", cmd) + if getRedisClusterSlots(ctx, redisClient, removePodNodeID) != "0" { + log.FromContext(ctx).V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // verifyLeaderPod return true if the pod is leader/master -func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) - redisClient := configureRedisClient(client, logger, cr, podName) + redisClient := configureRedisClient(ctx, client, cr, podName) defer redisClient.Close() - return verifyLeaderPodInfo(ctx, redisClient, logger, podName) + return verifyLeaderPodInfo(ctx, redisClient, podName) } -func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) bool { +func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, podName string) bool { info, err := redisClient.Info(ctx, "replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return false } @@ -392,8 +391,8 @@ func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger return false } -func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func ClusterFailover(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) // cmd = redis-cli cluster failover -a var cmd []string pod := RedisDetails{ @@ -406,13 +405,13 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -420,6 +419,6 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, slavePodName)...) - logger.V(1).Info("Redis cluster failover command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, slavePodName) + log.FromContext(ctx).V(1).Info("Redis cluster failover command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, slavePodName) } diff --git a/pkg/k8sutils/cluster-scaling_test.go b/pkg/k8sutils/cluster-scaling_test.go index fe1bd3bd0..69fa1fba0 100644 --- a/pkg/k8sutils/cluster-scaling_test.go +++ b/pkg/k8sutils/cluster-scaling_test.go @@ -5,15 +5,12 @@ import ( "fmt" "testing" - "github.com/go-logr/logr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" ) func Test_verifyLeaderPodInfo(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string section string @@ -52,7 +49,7 @@ func Test_verifyLeaderPodInfo(t *testing.T) { mock.ExpectInfo(tt.section).SetVal(tt.response) } - result := verifyLeaderPodInfo(ctx, client, logger, "test-pod") + result := verifyLeaderPodInfo(ctx, client, "test-pod") assert.Equal(t, tt.expectedBool, result, "Test case: "+tt.name) @@ -64,8 +61,6 @@ func Test_verifyLeaderPodInfo(t *testing.T) { } func Test_getRedisClusterSlots(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string nodeID string @@ -138,7 +133,7 @@ func Test_getRedisClusterSlots(t *testing.T) { mock.ExpectClusterSlots().SetVal(tt.clusterSlots) } - result := getRedisClusterSlots(ctx, client, logger, tt.nodeID) + result := getRedisClusterSlots(ctx, client, tt.nodeID) assert.Equal(t, tt.expectedResult, result, "Test case: "+tt.name) @@ -150,8 +145,6 @@ func Test_getRedisClusterSlots(t *testing.T) { } func Test_getAttachedFollowerNodeIDs(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string masterNodeID string @@ -209,7 +202,7 @@ func Test_getAttachedFollowerNodeIDs(t *testing.T) { mock.ExpectClusterSlaves(tt.masterNodeID).SetVal(tt.slaveNodeIDs) } - result := getAttachedFollowerNodeIDs(ctx, client, logger, tt.masterNodeID) + result := getAttachedFollowerNodeIDs(ctx, client, tt.masterNodeID) assert.ElementsMatch(t, tt.expectedslaveNodeIDs, result, "Test case: "+tt.name) diff --git a/pkg/k8sutils/finalizer.go b/pkg/k8sutils/finalizer.go index 338224698..6331fd319 100644 --- a/pkg/k8sutils/finalizer.go +++ b/pkg/k8sutils/finalizer.go @@ -5,13 +5,13 @@ import ( "fmt" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -22,17 +22,17 @@ const ( ) // HandleRedisFinalizer finalize resource if instance is marked to be deleted -func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.Redis) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) return err } } @@ -41,17 +41,17 @@ func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interfa } // HandleRedisClusterFinalizer finalize resource if instance is marked to be deleted -func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisClusterFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisClusterPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisClusterPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisClusterFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisClusterFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisClusterFinalizer) return err } } @@ -60,17 +60,17 @@ func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes. } // Handle RedisReplicationFinalizer finalize resource if instance is marked to be deleted -func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisReplicationFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisReplicationPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisReplicationPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisReplicationFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) return err } } @@ -79,12 +79,12 @@ func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kuberne } // HandleRedisSentinelFinalizer finalize resource if instance is marked to be deleted -func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { +func HandleRedisSentinelFinalizer(ctx context.Context, ctrlclient client.Client, cr *redisv1beta2.RedisSentinel) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisSentinelFinalizer) { controllerutil.RemoveFinalizer(cr, RedisSentinelFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) return err } } @@ -93,7 +93,7 @@ func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, } // AddFinalizer add finalizer for graceful deletion -func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { +func AddFinalizer(ctx context.Context, cr client.Object, finalizer string, cl client.Client) error { if !controllerutil.ContainsFinalizer(cr, finalizer) { controllerutil.AddFinalizer(cr, finalizer) return cl.Update(context.TODO(), cr) @@ -102,26 +102,26 @@ func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { } // finalizeRedisPVC delete PVC -func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func finalizeRedisPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.Redis) error { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-0", pvcTemplateName, cr.Name) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) return err } return nil } // finalizeRedisClusterPVC delete PVCs -func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { for _, role := range []string{"leader", "follower"} { for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name+"-"+role) PVCName := fmt.Sprintf("%s-%s-%s-%d", pvcTemplateName, cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -130,7 +130,7 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr PVCName := fmt.Sprintf("%s-%s-%s-%d", "node-conf", cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -140,13 +140,13 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr } // finalizeRedisReplicationPVC delete PVCs -func finalizeRedisReplicationPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func finalizeRedisReplicationPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-%d", pvcTemplateName, cr.Name, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index a5324369b..4f46d8f7f 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -9,7 +9,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mockClient "github.com/OT-CONTAINER-KIT/redis-operator/mocks/client" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -132,7 +131,6 @@ func TestHandleRedisFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(tc.existingPVC.DeepCopyObject()) @@ -147,7 +145,7 @@ func TestHandleRedisFinalizer(t *testing.T) { assert.NoError(t, err) } - err := HandleRedisFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -262,7 +260,6 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -279,7 +276,7 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { } } - err := HandleRedisClusterFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisClusterFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -461,7 +458,6 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -478,7 +474,7 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { } } - err := HandleRedisReplicationFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisReplicationFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -557,8 +553,7 @@ func TestHandleRedisSentinelFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) - err := HandleRedisSentinelFinalizer(tc.mockClient, logger, tc.cr) + err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -597,7 +592,6 @@ func TestFinalizeRedisPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) cr := &v1beta2.Redis{ ObjectMeta: metav1.ObjectMeta{ Name: "test-redis", @@ -618,7 +612,7 @@ func TestFinalizeRedisPVC(t *testing.T) { assert.NoError(t, err) } - err := finalizeRedisPVC(k8sClient, logger, cr) + err := finalizeRedisPVC(context.TODO(), k8sClient, cr) if tc.expectError { assert.Error(t, err) assert.Equal(t, tc.errorExpected, err) @@ -694,7 +688,6 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -702,7 +695,7 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisReplicationPVC(k8sClient, logger, tc.redisReplication) + err := finalizeRedisReplicationPVC(context.TODO(), k8sClient, tc.redisReplication) if tc.expectError { assert.Error(t, err) } else { @@ -765,7 +758,6 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -773,7 +765,7 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisClusterPVC(k8sClient, logger, tc.redisCluster) + err := finalizeRedisClusterPVC(context.TODO(), k8sClient, tc.redisCluster) if tc.expectError { assert.Error(t, err) } else { @@ -886,7 +878,7 @@ func TestAddFinalizer(t *testing.T) { return nil }, } - err := AddFinalizer(tt.args.cr, tt.args.finalizer, mc) + err := AddFinalizer(context.TODO(), tt.args.cr, tt.args.finalizer, mc) if (err != nil) != tt.wantErr { t.Errorf("AddFinalizer() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/pkg/k8sutils/pod.go b/pkg/k8sutils/pod.go index e52b131fc..c79a95f86 100644 --- a/pkg/k8sutils/pod.go +++ b/pkg/k8sutils/pod.go @@ -6,11 +6,11 @@ import ( "fmt" "strings" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Pod interface { @@ -20,14 +20,11 @@ type Pod interface { type PodService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewPodService(kubeClient kubernetes.Interface, log logr.Logger) *PodService { - log = log.WithValues("service", "k8s.pod") +func NewPodService(kubeClient kubernetes.Interface) *PodService { return &PodService{ kubeClient: kubeClient, - log: log, } } @@ -48,7 +45,7 @@ type patchStringValue struct { } func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName string, labels map[string]string) error { - s.log.Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) + log.FromContext(ctx).V(1).Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) var payloads []interface{} for labelKey, labelValue := range labels { @@ -63,7 +60,7 @@ func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName stri _, err := s.kubeClient.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) if err != nil { - s.log.Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) + log.FromContext(ctx).Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) } return err } diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 4561b8cc9..6dc97a970 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -7,31 +7,30 @@ import ( commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateRedisLeaderPodDisruptionBudget check and create a PodDisruptionBudget for Leaders -func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-" + role - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, cluster, role, cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generatePodDisruptionBudgetDef(cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + pdbDef := generatePodDisruptionBudgetDef(ctx, cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -39,22 +38,21 @@ func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role strin } } -func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-sentinel" - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, sentinel, "sentinel", cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generateSentinelPodDisruptionBudgetDef(cr, "sentinel", pdbMeta, pdbParams) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + pdbDef := generateSentinelPodDisruptionBudgetDef(ctx, cr, "sentinel", pdbMeta, pdbParams) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -63,7 +61,7 @@ func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbPar } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generatePodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, @@ -90,7 +88,7 @@ func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generateSentinelPodDisruptionBudgetDef(cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, @@ -117,25 +115,23 @@ func generateSentinelPodDisruptionBudgetDef(cr *redisv1beta2.RedisSentinel, role } // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdatePodDisruptionBudget(pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(pdbDef.Namespace, pdbDef.Name) - storedPDB, err := GetPodDisruptionBudget(pdbDef.Namespace, pdbDef.Name, cl) +func CreateOrUpdatePodDisruptionBudget(ctx context.Context, pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { + storedPDB, err := GetPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef.Name, cl) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(pdbDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } if errors.IsNotFound(err) { - return createPodDisruptionBudget(pdbDef.Namespace, pdbDef, cl) + return createPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef, cl) } return err } - return patchPodDisruptionBudget(storedPDB, pdbDef, pdbDef.Namespace, cl) + return patchPodDisruptionBudget(ctx, storedPDB, pdbDef, pdbDef.Namespace, cl) } // patchPodDisruptionBudget will patch Redis Kubernetes PodDisruptionBudgets -func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, storedPdb.Name) +func patchPodDisruptionBudget(ctx context.Context, storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newPdb.ResourceVersion = storedPdb.ResourceVersion newPdb.CreationTimestamp = storedPdb.CreationTimestamp @@ -151,11 +147,11 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p patch.IgnoreStatusFields(), ) if err != nil { - logger.Error(err, "Unable to patch redis PodDisruption with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruption with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", + log.FromContext(ctx).V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", "patch", string(patchResult.Patch), "Current", string(patchResult.Current), "Original", string(patchResult.Original), @@ -167,67 +163,57 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newPdb); err != nil { - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } - return updatePodDisruptionBudget(namespace, newPdb, cl) + return updatePodDisruptionBudget(ctx, namespace, newPdb, cl) } return nil } // createPodDisruptionBudget is a method to create PodDisruptionBudgets in Kubernetes -func createPodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func createPodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget creation failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget creation failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget creation was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget creation was successful") return nil } // updatePodDisruptionBudget is a method to update PodDisruptionBudgets in Kubernetes -func updatePodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func updatePodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Update(context.TODO(), pdb, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget update failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget update failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) return nil } // deletePodDisruptionBudget is a method to delete PodDisruptionBudgets in Kubernetes -func deletePodDisruptionBudget(namespace string, pdbName string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdbName) +func deletePodDisruptionBudget(ctx context.Context, namespace string, pdbName string, cl kubernetes.Interface) error { err := cl.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdbName, metav1.DeleteOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruption deletion failed") + log.FromContext(ctx).Error(err, "Redis PodDisruption deletion failed") return err } - logger.V(1).Info("Redis PodDisruption delete was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruption delete was successful") return nil } // GetPodDisruptionBudget is a method to get PodDisruptionBudgets in Kubernetes -func GetPodDisruptionBudget(namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { - logger := pdbLogger(namespace, pdb) +func GetPodDisruptionBudget(ctx context.Context, namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("PodDisruptionBudget", "policy/v1"), } pdbInfo, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), pdb, getOpts) if err != nil { - logger.V(1).Info("Redis PodDisruptionBudget get action failed") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action failed") return nil, err } - logger.V(1).Info("Redis PodDisruptionBudget get action was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action was successful") return pdbInfo, err } - -// pdbLogger will generate logging interface for PodDisruptionBudgets -func pdbLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.PodDisruptionBudget.Namespace", namespace, "Request.PodDisruptionBudget.Name", name) - return reqLogger -} diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index 7fe9515dd..beb6ac9b7 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -1,16 +1,17 @@ package k8sutils import ( + "context" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisClusterSTS is a interface to call Redis Statefulset function @@ -32,7 +33,7 @@ type RedisClusterService struct { } // generateRedisClusterParams generates Redis cluster information -func generateRedisClusterParams(cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { +func generateRedisClusterParams(ctx context.Context, cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -105,7 +106,7 @@ func generateRedisClusterInitContainerParams(cr *redisv1beta2.RedisCluster) init } // generateRedisClusterContainerParams generates Redis container information -func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { +func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -141,9 +142,9 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo nps := map[string]ports{} // pod name to ports replicas := cr.Spec.GetReplicaCounts(role) for i := 0; i < int(replicas); i++ { - svc, err := getService(cl, logger, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) + svc, err := getService(ctx, cl, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) if err != nil { - log.Error(err, "Cannot get service for Redis", "Setup.Type", role) + log.FromContext(ctx).Error(err, "Cannot get service for Redis", "Setup.Type", role) } else { nps[svc.Name] = ports{ announcePort: int(svc.Spec.Ports[0].NodePort), @@ -211,7 +212,7 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo } // CreateRedisLeader will create a leader redis setup -func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeader(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "leader", SecurityContext: cr.Spec.RedisLeader.SecurityContext, @@ -225,11 +226,11 @@ func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) e if cr.Spec.RedisLeader.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisFollower will create a follower redis setup -func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollower(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "follower", SecurityContext: cr.Spec.RedisFollower.SecurityContext, @@ -243,23 +244,23 @@ func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) if cr.Spec.RedisFollower.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisLeaderService method will create service for Redis Leader -func CreateRedisLeaderService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeaderService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "leader", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } // CreateRedisFollowerService method will create service for Redis Follower -func CreateRedisFollowerService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollowerService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "follower", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) int32 { @@ -267,34 +268,32 @@ func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) in } // CreateRedisClusterSetup will create Redis Setup for leader and follower -func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterSTS) CreateRedisClusterSetup(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType - logger := statefulSetLogger(cr.Namespace, stateFulName) labels := getRedisLabels(stateFulName, cluster, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, - generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service), + generateRedisClusterParams(ctx, cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), - generateRedisClusterContainerParams(cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), + generateRedisClusterContainerParams(ctx, cl, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) + log.FromContext(ctx).Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) return err } return nil } // CreateRedisClusterService method will create service for Redis -func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) CreateRedisClusterService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, cluster, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -309,40 +308,39 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } additionalServiceType := cr.Spec.KubernetesConfig.GetServiceType() if additionalServiceType == "NodePort" { // If NodePort is enabled, we need to create a service for every redis pod. // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. - err = service.createOrUpdateClusterNodePortService(cr, cl) + err = service.createOrUpdateClusterNodePortService(ctx, cr, cl) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil } -func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) createOrUpdateClusterNodePortService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { replicas := cr.Spec.GetReplicaCounts(service.RedisServiceRole) for i := 0; i < int(replicas); i++ { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole + "-" + strconv.Itoa(i) - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(cr.ObjectMeta.Name+"-"+service.RedisServiceRole, cluster, service.RedisServiceRole, map[string]string{ "statefulset.kubernetes.io/pod-name": serviceName, }) @@ -357,9 +355,9 @@ func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redi IntVal: int32(*cr.Spec.Port + 10000), }, } - err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) + err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } diff --git a/pkg/k8sutils/redis-cluster_test.go b/pkg/k8sutils/redis-cluster_test.go index 65c99bc4a..90993475d 100644 --- a/pkg/k8sutils/redis-cluster_test.go +++ b/pkg/k8sutils/redis-cluster_test.go @@ -1,13 +1,13 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -165,7 +165,7 @@ func Test_generateRedisClusterParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actualLeaderSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualLeaderSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "leader", ExternalConfig: input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisLeader.SecurityContext, @@ -178,7 +178,7 @@ func Test_generateRedisClusterParams(t *testing.T) { }) assert.EqualValues(t, expectedLeaderSTS, actualLeaderSTS, "Expected %+v, got %+v", expectedLeaderSTS, actualLeaderSTS) - actualFollowerSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualFollowerSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "follower", ExternalConfig: input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisFollower.SecurityContext, @@ -430,12 +430,11 @@ func Test_generateRedisClusterContainerParams(t *testing.T) { if err != nil { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - logger := testr.New(t) - actualLeaderContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") + actualLeaderContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") assert.EqualValues(t, expectedLeaderContainer, actualLeaderContainer, "Expected %+v, got %+v", expectedLeaderContainer, actualLeaderContainer) - actualFollowerContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") + actualFollowerContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") assert.EqualValues(t, expectedFollowerContainer, actualFollowerContainer, "Expected %+v, got %+v", expectedFollowerContainer, actualFollowerContainer) } diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index d1287e4f6..ad6a84128 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -5,15 +5,14 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateReplicationService method will create replication service for Redis -func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) epp := disableMetrics @@ -37,24 +36,24 @@ func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.I masterObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-master", cr.Namespace, masterLabels, annotations) replicaObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-replica", cr.Namespace, replicaLabels, annotations) - if err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication headless service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication headless service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { - logger.Error(err, "Cannot create additional service for Redis Replication") + if err := CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis Replication") return err } - if err := CreateOrUpdateService(cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create master service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create master service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replica service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replica service for Redis") return err } @@ -62,16 +61,15 @@ func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.I } // CreateReplicationRedis will create a replication redis setup -func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { +func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisReplicationParams(cr), @@ -81,7 +79,7 @@ func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Int cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create replication statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create replication statefulset for Redis") return err } return nil @@ -216,9 +214,9 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati return initcontainerProp } -func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { +func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { // statefulset name the same as the redis replication name - sts, err := GetStatefulSet(client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) + sts, err := GetStatefulSet(ctx, client, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) if err != nil { return false } @@ -234,7 +232,7 @@ func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kub // Enhanced check: When the pod is ready, it may not have been // created as part of a replication cluster, so we should verify // whether there is an actual master node. - if master := getRedisReplicationMasterIP(ctx, client, logger, rs, dClient); master == "" { + if master := getRedisReplicationMasterIP(ctx, client, rs, dClient); master == "" { return false } return true diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 3d2f4ba6b..4b1a56ff2 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -7,13 +7,13 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisSentinelSTS is a interface to call Redis Statefulset function @@ -36,7 +36,7 @@ type RedisReplicationObject struct { } // Redis Sentinel Create the Redis Sentinel Setup -func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { prop := RedisSentinelSTS{ RedisStateFulType: "sentinel", Affinity: cr.Spec.Affinity, @@ -49,43 +49,43 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig } - return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl, dcl) + return prop.CreateRedisSentinelSetup(ctx, client, cr, cl, dcl) } // Create RedisSentinel Service -func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { prop := RedisSentinelService{ RedisServiceRole: "sentinel", } - return prop.CreateRedisSentinelService(cr, cl) + return prop.CreateRedisSentinelService(ctx, cr, cl) } // Create Redis Sentinel Cluster Setup -func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, - generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), + generateRedisSentinelParams(ctx, cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), - generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), + generateRedisSentinelContainerParams(ctx, client, cr, service.ReadinessProbe, service.LivenessProbe, dcl), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create Sentinel statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create Sentinel statefulset for Redis") return err } return nil } // Create Redis Sentile Params for the statefulset -func generateRedisSentinelParams(cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { +func generateRedisSentinelParams(ctx context.Context, cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -148,7 +148,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in } // Create Redis Sentinel Statefulset Container Params -func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { +func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -158,7 +158,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes Resources: cr.Spec.KubernetesConfig.Resources, SecurityContext: cr.Spec.SecurityContext, Port: ptr.To(sentinelPort), - AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr, dcl), + AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, cr, dcl), } if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars @@ -206,9 +206,8 @@ func (service RedisSentinelSTS) getSentinelCount(cr *redisv1beta2.RedisSentinel) } // Create the Service for redis sentinel -func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, sentinel, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -224,18 +223,19 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisSentinelAsOwner(cr), @@ -246,13 +246,13 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil } -func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { +func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { if cr.Spec.RedisSentinelConfig == nil { return &[]corev1.EnvVar{} } @@ -264,7 +264,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo }, { Name: "IP", - Value: getRedisReplicationMasterIP(ctx, client, logger, cr, dcl), + Value: getRedisReplicationMasterIP(ctx, client, cr, dcl), }, { Name: "PORT", @@ -297,7 +297,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo return envVar } -func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { +func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { replicationName := cr.Spec.RedisSentinelConfig.RedisReplicationName replicationNamespace := cr.Namespace @@ -312,41 +312,41 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac }).Namespace(replicationNamespace).Get(context.TODO(), replicationName, v1.GetOptions{}) if err != nil { - logger.Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) return "" } else { - logger.V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) } // Marshal CustomObject to JSON replicationJSON, err := customObject.MarshalJSON() if err != nil { - logger.Error(err, "Failed To Load JSON") + log.FromContext(ctx).Error(err, "Failed To Load JSON") return "" } // Unmarshal The JSON on Object if err := json.Unmarshal(replicationJSON, &replicationInstance); err != nil { - logger.Error(err, "Failed To Unmarshal JSON over the Object") + log.FromContext(ctx).Error(err, "Failed To Unmarshal JSON over the Object") return "" } - masterPods := GetRedisNodesByRole(ctx, client, logger, &replicationInstance, "master") + masterPods := GetRedisNodesByRole(ctx, client, &replicationInstance, "master") if len(masterPods) == 0 { - logger.Error(errors.New("no master pods found"), "") + log.FromContext(ctx).Error(errors.New("no master pods found"), "") return "" } for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, &replicationInstance, podName) + redisClient := configureRedisReplicationClient(ctx, client, &replicationInstance, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { realMasterPod = podName break } } if realMasterPod == "" { - logger.Error(errors.New("no real master pod found"), "") + log.FromContext(ctx).Error(errors.New("no real master pod found"), "") return "" } @@ -354,5 +354,5 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac PodName: realMasterPod, Namespace: replicationNamespace, } - return getRedisServerIP(client, logger, realMasterInfo) + return getRedisServerIP(ctx, client, realMasterInfo) } diff --git a/pkg/k8sutils/redis-sentinel_test.go b/pkg/k8sutils/redis-sentinel_test.go index 76b467c4c..5f2160748 100644 --- a/pkg/k8sutils/redis-sentinel_test.go +++ b/pkg/k8sutils/redis-sentinel_test.go @@ -9,7 +9,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -83,7 +82,7 @@ func Test_generateRedisSentinelParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelParams(input, *input.Spec.Size, nil, input.Spec.Affinity) + actual := generateRedisSentinelParams(context.TODO(), input, *input.Spec.Size, nil, input.Spec.Affinity) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -208,7 +207,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil, nil) + actual := generateRedisSentinelContainerParams(context.TODO(), nil, input, nil, nil, nil) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -292,7 +291,6 @@ func Test_generateRedisSentinelInitContainerParams(t *testing.T) { func Test_getSentinelEnvVariable(t *testing.T) { type args struct { client kubernetes.Interface - logger logr.Logger cr *redisv1beta2.RedisSentinel } tests := []struct { @@ -304,7 +302,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{}, }, want: &[]corev1.EnvVar{}, @@ -313,7 +310,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is not nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{ Spec: redisv1beta2.RedisSentinelSpec{ RedisSentinelConfig: &redisv1beta2.RedisSentinelConfig{ @@ -364,7 +360,7 @@ func Test_getSentinelEnvVariable(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := context.TODO() - if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.logger, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { + if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { t.Errorf("getSentinelEnvVariable() = %v, want %v", got, tt.want) } }) diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index 05a911760..11bf31ac1 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -1,15 +1,17 @@ package k8sutils import ( + "context" + redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateStandaloneService method will create standalone service for Redis -func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -24,17 +26,18 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone headless service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone headless service for Redis") return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone service for Redis") return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisAsOwner(cr), @@ -45,21 +48,20 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis") + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis") return err } return nil } // CreateStandaloneRedis will create a standalone redis setup -func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateStandaloneRedis(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisStandaloneParams(cr), @@ -69,7 +71,7 @@ func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) erro cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create standalone statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone statefulset for Redis") return err } return nil diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index 5f65e5db3..ea561e634 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -10,13 +10,13 @@ import ( "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisDetails will hold the information for Redis Pod @@ -26,35 +26,35 @@ type RedisDetails struct { } // getRedisServerIP will return the IP of redis service -func getRedisServerIP(client kubernetes.Interface, logger logr.Logger, redisInfo RedisDetails) string { - logger.V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) +func getRedisServerIP(ctx context.Context, client kubernetes.Interface, redisInfo RedisDetails) string { + log.FromContext(ctx).V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) redisPod, err := client.CoreV1().Pods(redisInfo.Namespace).Get(context.TODO(), redisInfo.PodName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } redisIP := redisPod.Status.PodIP - logger.V(1).Info("Fetched Redis pod IP", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Fetched Redis pod IP", "ip", redisIP) // Check if IP is empty if redisIP == "" { - logger.V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } // If we're NOT IPv4, assume we're IPv6.. if net.ParseIP(redisIP).To4() == nil { - logger.V(1).Info("Redis is using IPv6", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Redis is using IPv6", "ip", redisIP) } - logger.V(1).Info("Successfully got the IP for Redis", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Successfully got the IP for Redis", "ip", redisIP) return redisIP } -func getRedisServerAddress(client kubernetes.Interface, logger logr.Logger, rd RedisDetails, port int) string { - ip := getRedisServerIP(client, logger, rd) +func getRedisServerAddress(ctx context.Context, client kubernetes.Interface, rd RedisDetails, port int) string { + ip := getRedisServerIP(ctx, client, rd) format := "%s:%d" // if ip is IPv6, wrap it in brackets @@ -72,12 +72,12 @@ func getRedisHostname(redisInfo RedisDetails, cr *redisv1beta2.RedisCluster, rol } // CreateSingleLeaderRedisCommand will create command for single leader cluster creation -func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateSingleLeaderRedisCommand(ctx context.Context, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "CLUSTER", "ADDSLOTS"} for i := 0; i < 16384; i++ { cmd = append(cmd, strconv.Itoa(i)) } - logger.V(1).Info("Generating Redis Add Slots command for single node cluster", + log.FromContext(ctx).V(1).Info("Generating Redis Add Slots command for single node cluster", "BaseCommand", cmd[:3], "SlotsRange", "0-16383", "TotalSlots", 16384) @@ -87,14 +87,14 @@ func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCl // RepairDisconnectedMasters attempts to repair disconnected/failed masters by issuing // a CLUSTER MEET with the updated address of the host -func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - return repairDisconnectedMasters(ctx, client, logger, cr, redisClient) + return repairDisconnectedMasters(ctx, client, cr, redisClient) } -func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { - nodes, err := clusterNodes(ctx, redisClient, logger) +func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { + nodes, err := clusterNodes(ctx, redisClient) if err != nil { return err } @@ -106,12 +106,11 @@ func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, if !nodeFailedOrDisconnected(node) { continue } - log.V(1).Info("found disconnected master node", "node", node) podName, err := getMasterHostFromClusterNode(node) if err != nil { return err } - ip := getRedisServerIP(client, logger, RedisDetails{ + ip := getRedisServerIP(ctx, client, RedisDetails{ PodName: podName, Namespace: cr.Namespace, }) @@ -133,7 +132,7 @@ func getMasterHostFromClusterNode(node clusterNodesResponse) (string, error) { } // CreateMultipleLeaderRedisCommand will create command for single leader cluster creation -func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "--cluster", "create"} replicas := cr.Spec.GetReplicaCounts("leader") @@ -143,42 +142,42 @@ func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.L if cr.Spec.ClusterVersion != nil && *cr.Spec.ClusterVersion == "v7" { address = getRedisHostname(RedisDetails{PodName: podName, Namespace: cr.Namespace}, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - address = getRedisServerAddress(client, logger, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) + address = getRedisServerAddress(ctx, client, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) } cmd = append(cmd, address) } cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) + log.FromContext(ctx).V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) return cmd } // ExecuteRedisClusterCommand will execute redis cluster creation command -func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string replicas := cr.Spec.GetReplicaCounts("leader") switch int(replicas) { case 1: - err := executeFailoverCommand(ctx, client, logger, cr, "leader") + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "error executing failover command") + log.FromContext(ctx).Error(err, "error executing failover command") } - cmd = CreateSingleLeaderRedisCommand(logger, cr) + cmd = CreateSingleLeaderRedisCommand(ctx, cr) default: - cmd = CreateMultipleLeaderRedisCommand(client, logger, cr) + cmd = CreateMultipleLeaderRedisCommand(ctx, client, cr) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster creation command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster creation command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []string { @@ -194,7 +193,7 @@ func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []str } // createRedisReplicationCommand will create redis replication creation command -func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { +func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { cmd := []string{"redis-cli", "--cluster", "add-node"} var followerAddress, leaderAddress string @@ -202,16 +201,16 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg followerAddress = getRedisHostname(followerPod, cr, "follower") + fmt.Sprintf(":%d", *cr.Spec.Port) leaderAddress = getRedisHostname(leaderPod, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - followerAddress = getRedisServerAddress(client, logger, followerPod, *cr.Spec.Port) - leaderAddress = getRedisServerAddress(client, logger, leaderPod, *cr.Spec.Port) + followerAddress = getRedisServerAddress(ctx, client, followerPod, *cr.Spec.Port) + leaderAddress = getRedisServerAddress(ctx, client, leaderPod, *cr.Spec.Port) } cmd = append(cmd, followerAddress, leaderAddress, "--cluster-slave") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) + log.FromContext(ctx).Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) } else { cmd = append(cmd, "-a", pass) } @@ -219,7 +218,7 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...) - logger.V(1).Info("Generated Redis replication command", + log.FromContext(ctx).V(1).Info("Generated Redis replication command", "FollowerAddress", followerAddress, "LeaderAddress", leaderAddress, "Command", cmd) @@ -227,18 +226,18 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg } // ExecuteRedisReplicationCommand will execute the replication command -func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var podIP string followerCounts := cr.Spec.GetReplicaCounts("follower") leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - nodes, err := clusterNodes(ctx, redisClient, logger) + nodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } for followerIdx := 0; followerIdx <= int(followerCounts)-1; { for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ { @@ -250,24 +249,24 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa((followerIdx)%int(leaderCounts)), Namespace: cr.Namespace, } - podIP = getRedisServerIP(client, logger, followerPod) - if !checkRedisNodePresence(cr, nodes, podIP) { - logger.V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) - cmd := createRedisReplicationCommand(client, logger, cr, leaderPod, followerPod) - redisClient := configureRedisClient(client, logger, cr, followerPod.PodName) + podIP = getRedisServerIP(ctx, client, followerPod) + if !checkRedisNodePresence(ctx, cr, nodes, podIP) { + log.FromContext(ctx).V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) + cmd := createRedisReplicationCommand(ctx, client, cr, leaderPod, followerPod) + redisClient := configureRedisClient(ctx, client, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() redisClient.Close() if err != nil { - logger.Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) + log.FromContext(ctx).Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) continue } if pong == "PONG" { - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } else { - logger.V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) } } else { - logger.V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) } followerIdx++ @@ -278,7 +277,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter type clusterNodesResponse []string // clusterNodes will returns the response of CLUSTER NODES -func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Logger) ([]clusterNodesResponse, error) { +func clusterNodes(ctx context.Context, redisClient *redis.Client) ([]clusterNodesResponse, error) { output, err := redisClient.ClusterNodes(ctx).Result() if err != nil { return nil, err @@ -299,62 +298,60 @@ func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Lo } // ExecuteFailoverOperation will execute redis failover operations -func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - err := executeFailoverCommand(ctx, client, logger, cr, "leader") +func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "Redis command failed for leader nodes") return err } - err = executeFailoverCommand(ctx, client, logger, cr, "follower") + err = executeFailoverCommand(ctx, client, cr, "follower") if err != nil { - logger.Error(err, "Redis command failed for follower nodes") return err } return nil } // executeFailoverCommand will execute failover command -func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, role string) error { +func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, role string) error { replicas := cr.Spec.GetReplicaCounts(role) podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { - logger.V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) - client := configureRedisClient(client, logger, cr, podName+strconv.Itoa(podCount)) + log.FromContext(ctx).V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) + client := configureRedisClient(ctx, client, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") flushcommand := redis.NewStringCmd(ctx, "flushall") err = client.Process(ctx, flushcommand) if err != nil { - logger.Error(err, "Redis flush command failed with this error") + log.FromContext(ctx).Error(err, "Redis flush command failed with this error") return err } } err = client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } - logger.V(1).Info("Redis cluster failover executed", "Output", output) + log.FromContext(ctx).V(1).Info("Redis cluster failover executed", "Output", output) } return nil } // CheckRedisNodeCount will check the count of redis nodes -func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, nodeType string) int32 { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var redisNodeType string - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } count := len(clusterNodes) @@ -373,29 +370,29 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge count++ } } - logger.V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) + log.FromContext(ctx).V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) } else { - logger.V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) + log.FromContext(ctx).V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) } return int32(count) } // RedisClusterStatusHealth use `redis-cli --cluster check 127.0.0.1:6379` -func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() cmd := []string{"redis-cli", "--cluster", "check", "127.0.0.1:6379"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - out, err := executeCommand1(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + out, err := executeCommand1(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if err != nil { return false } @@ -409,10 +406,10 @@ func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, } // UnhealthyNodesInCluster returns the number of unhealthy nodes in the cluster cr -func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) (int, error) { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) (int, error) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { return 0, err } @@ -422,7 +419,7 @@ func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, l count++ } } - logger.V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) + log.FromContext(ctx).V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) return count, nil } @@ -435,7 +432,7 @@ func nodeFailedOrDisconnected(node clusterNodesResponse) bool { } // configureRedisClient will configure the Redis Client -func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { +func configureRedisClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -443,45 +440,45 @@ func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *r var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, *cr.Spec.Port), + Addr: getRedisServerAddress(ctx, client, redisInfo, *cr.Spec.Port), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) } return redis.NewClient(opts) } // executeCommand will execute the commands in pod -func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { - execOut, execErr := executeCommand1(client, logger, cr, cmd, podName) +func executeCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { + execOut, execErr := executeCommand1(ctx, client, cr, cmd, podName) if execErr != nil { - logger.Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) return } - logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) } -func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { +func executeCommand1(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { var ( execOut bytes.Buffer execErr bytes.Buffer ) config, err := GenerateK8sConfig()() if err != nil { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } - targetContainer, pod := getContainerID(client, logger, cr, podName) + targetContainer, pod := getContainerID(ctx, client, cr, podName) if targetContainer < 0 { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } @@ -494,7 +491,7 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv }, scheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) if err != nil { - logger.Error(err, "Failed to init executor") + log.FromContext(ctx).Error(err, "Failed to init executor") return "", err } @@ -510,27 +507,27 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv } // getContainerID will return the id of container from pod -func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { +func getContainerID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { pod, err := client.CoreV1().Pods(cr.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) return -1, nil } - logger.V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) targetContainer := -1 for containerID, tr := range pod.Spec.Containers { - logger.V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) if tr.Name == cr.ObjectMeta.Name+"-leader" { targetContainer = containerID - logger.V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) break } } if targetContainer == -1 { - logger.V(1).Info("Leader container not found in pod", "Pod Name", podName) + log.FromContext(ctx).V(1).Info("Leader container not found in pod", "Pod Name", podName) return -1, nil } @@ -538,9 +535,8 @@ func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1 } // checkRedisNodePresence will check if the redis node exist in cluster or not -func checkRedisNodePresence(cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - logger.V(1).Info("Checking if Node is in cluster", "Node", nodeName) +func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { + log.FromContext(ctx).V(1).Info("Checking if Node is in cluster", "Node", nodeName) for _, node := range nodeList { s := strings.Split(node[1], ":") if s[0] == nodeName { @@ -550,14 +546,8 @@ func checkRedisNodePresence(cr *redisv1beta2.RedisCluster, nodeList []clusterNod return false } -// generateRedisManagerLogger will generate logging interface for Redis operations -func generateRedisManagerLogger(namespace, name string) logr.Logger { - reqLogger := log.WithValues("Request.RedisManager.Namespace", namespace, "Request.RedisManager.Name", name) - return reqLogger -} - // configureRedisClient will configure the Redis Client -func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { +func configureRedisReplicationClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -565,27 +555,27 @@ func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Lo var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, 6379), + Addr: getRedisServerAddress(ctx, client, redisInfo, 6379), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) } return redis.NewClient(opts) } // Get Redis nodes by it's role i.e. master, slave and sentinel -func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { - statefulset, err := GetStatefulSet(cl, logger, cr.GetNamespace(), cr.GetName()) +func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisReplication, redisRole string) []string { + statefulset, err := GetStatefulSet(ctx, cl, cr.GetNamespace(), cr.GetName()) if err != nil { - logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) } var pods []string @@ -593,9 +583,9 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - redisClient := configureRedisReplicationClient(cl, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, cl, cr, podName) defer redisClient.Close() - podRole := checkRedisServerRole(ctx, redisClient, logger, podName) + podRole := checkRedisServerRole(ctx, redisClient, podName) if podRole == redisRole { pods = append(pods, podName) } @@ -605,29 +595,29 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo } // Check the Redis Server Role i.e. master, slave and sentinel -func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) string { +func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, podName string) string { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return "" } lines := strings.Split(info, "\r\n") for _, line := range lines { if strings.HasPrefix(line, "role:") { role := strings.TrimPrefix(line, "role:") - logger.V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) + log.FromContext(ctx).V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) return role } } - logger.Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) return "" } // checkAttachedSlave would return redis pod name which has slave -func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) int { +func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, podName string) int { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) return -1 // return -1 if failed to get the connected slaves count } @@ -637,35 +627,35 @@ func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger l var connected_slaves int connected_slaves, err = strconv.Atoi(strings.TrimPrefix(line, "connected_slaves:")) if err != nil { - logger.Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) return -1 } - logger.V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) + log.FromContext(ctx).V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) return connected_slaves } } - logger.Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) return 0 } -func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { - logger.V(1).Info("Redis Master Node is set to", "pod", realMasterPod) +func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { + log.FromContext(ctx).V(1).Info("Redis Master Node is set to", "pod", realMasterPod) realMasterInfo := RedisDetails{ PodName: realMasterPod, Namespace: cr.Namespace, } - realMasterPodIP := getRedisServerIP(client, logger, realMasterInfo) + realMasterPodIP := getRedisServerIP(ctx, client, realMasterInfo) for i := 0; i < len(masterPods); i++ { if masterPods[i] != realMasterPod { - redisClient := configureRedisReplicationClient(client, logger, cr, masterPods[i]) + redisClient := configureRedisReplicationClient(ctx, client, cr, masterPods[i]) defer redisClient.Close() - logger.V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() if err != nil { - logger.Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) return err } } @@ -674,12 +664,12 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa return nil } -func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { +func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string) string { for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, client, cr, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { return podName } } diff --git a/pkg/k8sutils/redis_test.go b/pkg/k8sutils/redis_test.go index 79314e02d..8f6035f3c 100644 --- a/pkg/k8sutils/redis_test.go +++ b/pkg/k8sutils/redis_test.go @@ -10,8 +10,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mock_utils "github.com/OT-CONTAINER-KIT/redis-operator/mocks/utils" - "github.com/go-logr/logr" - "github.com/go-logr/logr/testr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" @@ -49,7 +47,7 @@ func TestCheckRedisNodePresence(t *testing.T) { for _, tt := range tests { testname := fmt.Sprintf("%s,%s", tt.nodes, tt.ip) t.Run(testname, func(t *testing.T) { - ans := checkRedisNodePresence(cr, tt.nodes, tt.ip) + ans := checkRedisNodePresence(context.TODO(), cr, tt.nodes, tt.ip) if ans != tt.want { t.Errorf("got %t, want %t", ans, tt.want) } @@ -80,7 +78,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,redis-cluster-lea }) mock.ExpectClusterMeet(newPodIP, "6379").SetVal("OK") port := 6379 - err := repairDisconnectedMasters(ctx, k8sClient, logr.Discard(), &redisv1beta2.RedisCluster{ + err := repairDisconnectedMasters(ctx, k8sClient, &redisv1beta2.RedisCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, }, @@ -175,8 +173,8 @@ func TestGetRedisServerIP(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerIP(client, logger, tt.redisInfo) + + redisIP := getRedisServerIP(context.TODO(), client, tt.redisInfo) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty IP address") @@ -240,8 +238,8 @@ func TestGetRedisServerAddress(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerAddress(client, logger, tt.redisInfo, 6379) + + redisIP := getRedisServerAddress(context.TODO(), client, tt.redisInfo, 6379) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty address") @@ -286,9 +284,8 @@ func TestGetRedisHostname(t *testing.T) { } func TestCreateSingleLeaderRedisCommand(t *testing.T) { - logger := testr.New(t) cr := &redisv1beta2.RedisCluster{} - cmd := CreateSingleLeaderRedisCommand(logger, cr) + cmd := CreateSingleLeaderRedisCommand(context.TODO(), cr) assert.Equal(t, "redis-cli", cmd[0]) assert.Equal(t, "CLUSTER", cmd[1]) @@ -353,9 +350,8 @@ func TestCreateMultipleLeaderRedisCommand(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := mock_utils.CreateFakeClientWithPodIPs_LeaderPods(tt.redisCluster) - logger := testr.New(t) - cmd := CreateMultipleLeaderRedisCommand(client, logger, tt.redisCluster) + cmd := CreateMultipleLeaderRedisCommand(context.TODO(), client, tt.redisCluster) assert.Equal(t, tt.expectedCommands, cmd) }) } @@ -391,7 +387,6 @@ func TestGetRedisTLSArgs(t *testing.T) { } func TestCreateRedisReplicationCommand(t *testing.T) { - logger := logr.Discard() type secret struct { name string namespace string @@ -530,7 +525,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) { objects = append(objects, secret...) client := fake.NewSimpleClientset(objects...) - cmd := createRedisReplicationCommand(client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod) + cmd := createRedisReplicationCommand(context.TODO(), client, tt.redisCluster, tt.leaderPod, tt.followerPod) // Assert the command is as expected using testify assert.Equal(t, tt.expectedCommand, cmd) @@ -614,8 +609,7 @@ func TestGetContainerID(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.setupPod) - logger := testr.New(t) - id, pod := getContainerID(client, logger, test.redisCluster, test.setupPod.Name) + id, pod := getContainerID(context.TODO(), client, test.redisCluster, test.setupPod.Name) if test.expectError { assert.Nil(t, pod, "Expected no pod but got one") assert.Equal(t, test.expectedID, id, "Expected ID does not match") @@ -630,8 +624,6 @@ func TestGetContainerID(t *testing.T) { } func Test_checkAttachedSlave(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string podName string @@ -709,7 +701,7 @@ func Test_checkAttachedSlave(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - slaveCount := checkAttachedSlave(ctx, client, logger, tt.podName) + slaveCount := checkAttachedSlave(ctx, client, tt.podName) assert.Equal(t, tt.expectedSlaveCount, slaveCount, "Test case: "+tt.name) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unmet expectations: %s", err) @@ -719,8 +711,6 @@ func Test_checkAttachedSlave(t *testing.T) { } func Test_checkRedisServerRole(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string podName string @@ -798,7 +788,7 @@ func Test_checkRedisServerRole(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - role := checkRedisServerRole(ctx, client, logger, tt.podName) + role := checkRedisServerRole(ctx, client, tt.podName) if tt.shouldFail { assert.Empty(t, role, "Test case: "+tt.name) } else { @@ -812,7 +802,7 @@ func Test_checkRedisServerRole(t *testing.T) { } func TestClusterNodes(t *testing.T) { - logger := logr.Discard() // Discard logs + // Discard logs tests := []struct { name string @@ -853,7 +843,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself, } else { mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput) } - result, err := clusterNodes(context.TODO(), db, logger) + result, err := clusterNodes(context.TODO(), db) if tc.expectError != nil { assert.Nil(t, result) diff --git a/pkg/k8sutils/secrets.go b/pkg/k8sutils/secrets.go index 91cc852c2..8f095bc1e 100644 --- a/pkg/k8sutils/secrets.go +++ b/pkg/k8sutils/secrets.go @@ -7,35 +7,32 @@ import ( "errors" "strings" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var log = logf.Log.WithName("controller_redis") - // getRedisPassword method will return the redis password from the secret -func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { +func getRedisPassword(ctx context.Context, client kubernetes.Interface, namespace, name, secretKey string) (string, error) { secretName, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Failed in getting existing secret for redis") + logf.FromContext(ctx).Error(err, "Failed in getting existing secret for redis") return "", err } for key, value := range secretName.Data { if key == secretKey { - logger.V(1).Info("Secret key found in the secret", "secretKey", secretKey) + logf.FromContext(ctx).Info("Secret key found in the secret", "secretKey", secretKey) return strings.TrimSpace(string(value)), nil } } - logger.Error(errors.New("secret key not found"), "Secret key not found in the secret") + logf.FromContext(ctx).Error(errors.New("secret key not found"), "Secret key not found in the secret") return "", nil } -func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, namespace, tlsSecretName, podName string) *tls.Config { +func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, namespace, tlsSecretName, podName string) *tls.Config { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), tlsSecretName, metav1.GetOptions{}) if err != nil { - logger.V(1).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) return nil } @@ -44,20 +41,20 @@ func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, namespac tlsCaCertificate, caExists := secret.Data["ca.crt"] if !certExists || !keyExists || !caExists { - logger.Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") + logf.FromContext(ctx).Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") return nil } cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) if err != nil { - logger.V(1).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) return nil } tlsCaCertificates := x509.NewCertPool() ok := tlsCaCertificates.AppendCertsFromPEM(tlsCaCertificate) if !ok { - logger.V(1).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) return nil } diff --git a/pkg/k8sutils/secrets_test.go b/pkg/k8sutils/secrets_test.go index cb1734f6f..10c42d35c 100644 --- a/pkg/k8sutils/secrets_test.go +++ b/pkg/k8sutils/secrets_test.go @@ -1,13 +1,13 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -84,8 +84,8 @@ func Test_getRedisPassword(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - got, err := getRedisPassword(client, logger, tt.namespace, tt.secretName, tt.secretKey) + + got, err := getRedisPassword(context.TODO(), client, tt.namespace, tt.secretName, tt.secretKey) if tt.expectedErr { require.Error(t, err, "Expected an error but didn't get one") @@ -221,8 +221,8 @@ func Test_getRedisTLSConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - tlsConfig := getRedisTLSConfig(client, logger, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) + + tlsConfig := getRedisTLSConfig(context.TODO(), client, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) if tt.expectTLS { require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") diff --git a/pkg/k8sutils/services.go b/pkg/k8sutils/services.go index 627117277..1ad9aa63d 100644 --- a/pkg/k8sutils/services.go +++ b/pkg/k8sutils/services.go @@ -4,12 +4,12 @@ import ( "context" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -94,66 +94,59 @@ func generateServiceType(k8sServiceType string) corev1.ServiceType { } // createService is a method to create service is Kubernetes -func createService(kusClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func createService(ctx context.Context, kusClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := kusClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis service creation is failed") + log.FromContext(ctx).Error(err, "Redis service creation is failed") return err } - logger.V(1).Info("Redis service creation is successful") + log.FromContext(ctx).V(1).Info("Redis service creation is successful") return nil } // updateService is a method to update service is Kubernetes -func updateService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func updateService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := k8sClient.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis service update failed") + log.FromContext(ctx).Error(err, "Redis service update failed") return err } - logger.V(1).Info("Redis service updated successfully") + log.FromContext(ctx).V(1).Info("Redis service updated successfully") return nil } // getService is a method to get service is Kubernetes -func getService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, name string) (*corev1.Service, error) { +func getService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, name string) (*corev1.Service, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("Service", "v1"), } serviceInfo, err := k8sClient.CoreV1().Services(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis service get action is failed") + log.FromContext(ctx).V(1).Info("Redis service get action is failed") return nil, err } - logger.V(1).Info("Redis service get action is successful") + log.FromContext(ctx).V(1).Info("Redis service get action is successful") return serviceInfo, nil } -func serviceLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Service.Namespace", namespace, "Request.Service.Name", name) - return reqLogger -} - // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdateService(namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { - logger := serviceLogger(namespace, serviceMeta.Name) +func CreateOrUpdateService(ctx context.Context, namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { serviceDef := generateServiceDef(serviceMeta, epp, ownerDef, headless, serviceType, port, extra...) - storedService, err := getService(cl, logger, namespace, serviceMeta.GetName()) + storedService, err := getService(ctx, cl, namespace, serviceMeta.GetName()) if err != nil { if errors.IsNotFound(err) { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(serviceDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis service with compare annotations") + log.FromContext(ctx).Error(err, "Unable to patch redis service with compare annotations") } - return createService(cl, logger, namespace, serviceDef) + return createService(ctx, cl, namespace, serviceDef) } return err } - return patchService(storedService, serviceDef, namespace, cl) + return patchService(ctx, storedService, serviceDef, namespace, cl) } // patchService will patch Redis Kubernetes service -func patchService(storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { - logger := serviceLogger(namespace, storedService.Name) +func patchService(ctx context.Context, storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newService.ResourceVersion = storedService.ResourceVersion newService.CreationTimestamp = storedService.CreationTimestamp @@ -169,11 +162,11 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) for key, value := range storedService.Annotations { if _, present := newService.Annotations[key]; !present { @@ -181,12 +174,12 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newService); err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } - logger.V(1).Info("Syncing Redis service with defined properties") - return updateService(cl, logger, namespace, newService) + log.FromContext(ctx).V(1).Info("Syncing Redis service with defined properties") + return updateService(ctx, cl, namespace, newService) } - logger.V(1).Info("Redis service is already in-sync") + log.FromContext(ctx).V(1).Info("Redis service is already in-sync") return nil } diff --git a/pkg/k8sutils/services_test.go b/pkg/k8sutils/services_test.go index 5906d61db..96e2fc1c5 100644 --- a/pkg/k8sutils/services_test.go +++ b/pkg/k8sutils/services_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -311,7 +310,6 @@ func Test_createService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tt.exist { k8sClient = k8sClientFake.NewSimpleClientset(tt.service.DeepCopyObject()) @@ -319,7 +317,7 @@ func Test_createService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := createService(k8sClient, logger, tt.service.GetNamespace(), tt.service) + err := createService(context.TODO(), k8sClient, tt.service.GetNamespace(), tt.service) if tt.wantErr { assert.Error(t, err) } else { @@ -407,10 +405,9 @@ func Test_updateService(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) k8sClient := k8sClientFake.NewSimpleClientset(tt.current.DeepCopyObject()) - err := updateService(k8sClient, logger, tt.servinceNamespace, tt.updated) + err := updateService(context.TODO(), k8sClient, tt.servinceNamespace, tt.updated) if tt.wantErr { assert.Error(t, err) } else { @@ -460,7 +457,6 @@ func Test_getService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tt.have != nil { k8sClient = k8sClientFake.NewSimpleClientset(tt.have.DeepCopyObject()) @@ -468,7 +464,7 @@ func Test_getService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - got, err := getService(k8sClient, logger, tt.want.GetNamespace(), tt.want.GetName()) + got, err := getService(context.TODO(), k8sClient, tt.want.GetNamespace(), tt.want.GetName()) if tt.wantErr { assert.Error(t, err) } else { diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index bc31d4975..63060e1fb 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -11,7 +11,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -22,6 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) type StatefulSet interface { @@ -30,14 +30,11 @@ type StatefulSet interface { type StatefulSetService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewStatefulSetService(kubeClient kubernetes.Interface, log logr.Logger) *StatefulSetService { - log = log.WithValues("service", "k8s.statefulset") +func NewStatefulSetService(kubeClient kubernetes.Interface) *StatefulSetService { return &StatefulSetService{ kubeClient: kubeClient, - log: log, } } @@ -45,13 +42,11 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, var ( partition = 0 replicas = 1 - - logger = s.log.WithValues("namespace", namespace, "name", name) ) sts, err := s.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "failed to get statefulset") + log.FromContext(ctx).Error(err, "failed to get statefulset") return false } @@ -63,19 +58,19 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, } if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) { - logger.V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) return false } if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - logger.V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) return false } if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - logger.V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) return false } if int(sts.Status.ReadyReplicas) != replicas { - logger.V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) return false } return true @@ -154,25 +149,24 @@ type initContainerParameters struct { } // CreateOrUpdateStateFul method will create or update Redis service -func CreateOrUpdateStateFul(cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { - storedStateful, err := GetStatefulSet(cl, logger, namespace, stsMeta.Name) +func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { + storedStateful, err := GetStatefulSet(ctx, cl, namespace, stsMeta.Name) statefulSetDef := generateStatefulSetsDef(stsMeta, params, ownerDef, initcontainerParams, containerParams, getSidecars(sidecars)) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(statefulSetDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if apierrors.IsNotFound(err) { - return createStatefulSet(cl, logger, namespace, statefulSetDef) + return createStatefulSet(ctx, cl, namespace, statefulSetDef) } return err } - return patchStatefulSet(storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) + return patchStatefulSet(ctx, storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) } // patchStateFulSet will patch Redis Kubernetes StateFulSet -func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { - logger := statefulSetLogger(namespace, storedStateful.Name) +func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newStateful.ResourceVersion = storedStateful.ResourceVersion newStateful.CreationTimestamp = storedStateful.CreationTimestamp @@ -185,11 +179,11 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) if len(newStateful.Spec.VolumeClaimTemplates) >= 1 && len(newStateful.Spec.VolumeClaimTemplates) == len(storedStateful.Spec.VolumeClaimTemplates) { // Field is immutable therefore we MUST keep it as is. if !apiequality.Semantic.DeepEqual(newStateful.Spec.VolumeClaimTemplates[0].Spec, storedStateful.Spec.VolumeClaimTemplates[0].Spec) { @@ -237,7 +231,7 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St if !updateFailed { updateFailed = true } - logger.Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") + log.FromContext(ctx).Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") } } } @@ -246,9 +240,9 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St annotations["storageCapacity"] = fmt.Sprintf("%d", stateCapacity) storedStateful.Annotations = annotations if realUpdate { - logger.Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) } else { - logger.Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) } } } @@ -265,12 +259,12 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newStateful); err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } - return updateStatefulSet(cl, logger, namespace, newStateful, recreateStateFulSet) + return updateStatefulSet(ctx, cl, namespace, newStateful, recreateStateFulSet) } - logger.V(1).Info("Reconciliation Complete, no Changes required.") + log.FromContext(ctx).V(1).Info("Reconciliation Complete, no Changes required.") return nil } @@ -767,18 +761,18 @@ func getEnvironmentVariables(role string, enabledPassword *bool, secretName *str } // createStatefulSet is a method to create statefulset in Kubernetes -func createStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { +func createStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet) error { _, err := cl.AppsV1().StatefulSets(namespace).Create(context.TODO(), stateful, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis stateful creation failed") + log.FromContext(ctx).Error(err, "Redis stateful creation failed") return err } - logger.V(1).Info("Redis stateful successfully created") + log.FromContext(ctx).V(1).Info("Redis stateful successfully created") return nil } // updateStatefulSet is a method to update statefulset in Kubernetes -func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { +func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { _, err := cl.AppsV1().StatefulSets(namespace).Update(context.TODO(), stateful, metav1.UpdateOptions{}) if recreateStateFulSet { sErr, ok := err.(*apierrors.StatusError) @@ -787,7 +781,7 @@ func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st for messageCount, cause := range sErr.ErrStatus.Details.Causes { failMsg[messageCount] = cause.Message } - logger.V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) + log.FromContext(ctx).V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) propagationPolicy := metav1.DeletePropagationForeground if err := cl.AppsV1().StatefulSets(namespace).Delete(context.TODO(), stateful.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { //nolint return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") @@ -795,33 +789,27 @@ func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st } } if err != nil { - logger.Error(err, "Redis statefulset update failed") + log.FromContext(ctx).Error(err, "Redis statefulset update failed") return err } - logger.V(1).Info("Redis statefulset successfully updated ") + log.FromContext(ctx).V(1).Info("Redis statefulset successfully updated ") return nil } // GetStateFulSet is a method to get statefulset in Kubernetes -func GetStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { +func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, name string) (*appsv1.StatefulSet, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("StatefulSet", "apps/v1"), } statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis statefulset get action failed") + log.FromContext(ctx).V(1).Info("Redis statefulset get action failed") return nil, err } - logger.V(1).Info("Redis statefulset get action was successful") + log.FromContext(ctx).V(1).Info("Redis statefulset get action was successful") return statefulInfo, nil } -// statefulSetLogger will generate logging interface for Statfulsets -func statefulSetLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.StatefulSet.Namespace", namespace, "Request.StatefulSet.Name", name) - return reqLogger -} - func getSidecars(sidecars *[]redisv1beta2.Sidecar) []redisv1beta2.Sidecar { if sidecars == nil { return []redisv1beta2.Sidecar{} diff --git a/pkg/k8sutils/statefulset_test.go b/pkg/k8sutils/statefulset_test.go index f7d8ea7ae..b412c7456 100644 --- a/pkg/k8sutils/statefulset_test.go +++ b/pkg/k8sutils/statefulset_test.go @@ -7,7 +7,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -196,8 +195,6 @@ func TestGetVolumeMount(t *testing.T) { } func Test_GetStatefulSet(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string sts appsv1.StatefulSet @@ -229,7 +226,7 @@ func Test_GetStatefulSet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) - _, err := GetStatefulSet(client, logger, test.stsNamespace, test.stsName) + _, err := GetStatefulSet(context.TODO(), client, test.stsNamespace, test.stsName) if test.present { assert.Nil(t, err) } else { @@ -240,8 +237,6 @@ func Test_GetStatefulSet(t *testing.T) { } func Test_createStatefulSet(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string sts appsv1.StatefulSet @@ -279,7 +274,7 @@ func Test_createStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := createStatefulSet(client, logger, test.sts.GetNamespace(), &test.sts) + err := createStatefulSet(context.TODO(), client, test.sts.GetNamespace(), &test.sts) if test.present { assert.NotNil(t, err) } else { @@ -290,7 +285,6 @@ func Test_createStatefulSet(t *testing.T) { } func TestUpdateStatefulSet(t *testing.T) { - logger := logr.Discard() tests := []struct { name string existingStsSpec appsv1.StatefulSetSpec @@ -395,7 +389,7 @@ func TestUpdateStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := updateStatefulSet(client, logger, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) + err := updateStatefulSet(context.TODO(), client, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -412,7 +406,6 @@ func TestUpdateStatefulSet(t *testing.T) { } func TestCreateOrUpdateStateFul(t *testing.T) { - logger := logr.Discard() tests := []struct { name string stsParams statefulSetParameters @@ -554,7 +547,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -577,7 +570,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { client = k8sClientFake.NewSimpleClientset() - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) assert.Nil(err) } }) diff --git a/pkg/k8sutils/status.go b/pkg/k8sutils/status.go index aa19ea150..9f2f19216 100644 --- a/pkg/k8sutils/status.go +++ b/pkg/k8sutils/status.go @@ -6,23 +6,16 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api/status" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// statusLogger will generate logging interface for status -func statusLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Namespace", namespace, "Request.Name", name) - return reqLogger -} - // UpdateRedisClusterStatus will update the status of the RedisCluster -func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { - logger := statusLogger(cr.Namespace, cr.Name) +func UpdateRedisClusterStatus(ctx context.Context, cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { newStatus := redisv1beta2.RedisClusterStatus{ State: state, Reason: reason, @@ -40,14 +33,14 @@ func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, state status.RedisC } unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cr) if err != nil { - logger.Error(err, "Failed to convert CR to unstructured object") + log.FromContext(ctx).Error(err, "Failed to convert CR to unstructured object") return err } unstructuredRedisCluster := &unstructured.Unstructured{Object: unstructuredObj} _, err = dcl.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Failed to update status") + log.FromContext(ctx).Error(err, "Failed to update status") return err } return nil