From efb96249a7f1883e27be38d31eec60ac787a85d9 Mon Sep 17 00:00:00 2001 From: Dariusch Ochlast Date: Fri, 8 Nov 2024 03:46:25 +0100 Subject: [PATCH 01/28] feat: add PDB and probes, drop unspecified acl in sentinel helm (#1123) Signed-off-by: Dariusch Ochlast --- charts/redis-sentinel/README.md | 14 ++++++++++- .../templates/redis-sentinel.yaml | 23 +++++++++++++------ charts/redis-sentinel/values.yaml | 23 +++++++++++++++---- 3 files changed, 48 insertions(+), 12 deletions(-) diff --git a/charts/redis-sentinel/README.md b/charts/redis-sentinel/README.md index 8cb0ae420..5f6e82e81 100644 --- a/charts/redis-sentinel/README.md +++ b/charts/redis-sentinel/README.md @@ -48,7 +48,6 @@ helm delete --namespace | TLS.cert | string | `"tls.crt"` | | | TLS.key | string | `"tls.key"` | | | TLS.secret.secretName | string | `""` | | -| acl.secret.secretName | string | `""` | | | affinity | object | `{}` | | | env | list | `[]` | | | externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | @@ -64,10 +63,23 @@ helm delete --namespace | initContainer.imagePullPolicy | string | `"IfNotPresent"` | | | initContainer.resources | object | `{}` | | | labels | object | `{}` | | +| livenessProbe.failureThreshold | int | `3` | | +| livenessProbe.initialDelaySeconds | int | `1` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `1` | | | nodeSelector | object | `{}` | | +| pdb.enabled | bool | `false` | | +| pdb.maxUnavailable | string | `nil` | | +| pdb.minAvailable | int | `1` | | | podSecurityContext.fsGroup | int | `1000` | | | podSecurityContext.runAsUser | int | `1000` | | | priorityClassName | string | `""` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.initialDelaySeconds | int | `1` | | +| readinessProbe.periodSeconds | int | `10` | | +| readinessProbe.successThreshold | int | `1` | | +| readinessProbe.timeoutSeconds | int | `1` | | | redisExporter.enabled | bool | `false` | | | redisExporter.env | list | `[]` | | | redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | diff --git a/charts/redis-sentinel/templates/redis-sentinel.yaml b/charts/redis-sentinel/templates/redis-sentinel.yaml index 7a4313362..4ad8ac315 100644 --- a/charts/redis-sentinel/templates/redis-sentinel.yaml +++ b/charts/redis-sentinel/templates/redis-sentinel.yaml @@ -6,7 +6,7 @@ metadata: labels: {{- include "common.labels" . | nindent 4 }} spec: clusterSize: {{ .Values.redisSentinel.clusterSize }} -# Sentinel Config + # Sentinel Config redisSentinelConfig: redisReplicationName: {{ .Values.redisSentinelConfig.redisReplicationName}} {{- if and .Values.redisSentinelConfig.redisReplicationPassword.secretName .Values.redisSentinelConfig.redisReplicationPassword.secretKey }} @@ -52,11 +52,11 @@ spec: imagePullPolicy: "{{ .Values.redisExporter.imagePullPolicy }}" {{- if .Values.redisExporter.resources}} resources: {{ toYaml .Values.redisExporter.resources | nindent 6 }} - {{- end }} + {{- end }} {{- if .Values.redisExporter.env }} env: {{ toYaml .Values.redisExporter.env | nindent 6 }} {{- end }} - + {{- if .Values.nodeSelector }} nodeSelector: {{ toYaml .Values.nodeSelector | nindent 4 }} {{- end }} @@ -83,10 +83,19 @@ spec: secret: secretName: {{ .Values.TLS.secret.secretName | quote }} {{- end }} - {{- if and .Values.acl.secret (ne .Values.acl.secret.secretName "") }} - acl: - secret: - secretName: {{ .Values.acl.secret.secretName | quote }} + {{- if .Values.pdb.enabled }} + pdb: + enabled: {{ .enabled }} + minAvailable: {{ .minAvailable }} + maxUnavailable: {{ .maxUnavailable }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 4 }} {{- end }} {{- if and .Values.initContainer .Values.initContainer.enabled (ne .Values.initContainer.image "") }} initContainer: {{ include "initContainer.properties" | nindent 4 }} diff --git a/charts/redis-sentinel/values.yaml b/charts/redis-sentinel/values.yaml index 61cff67c2..fc1ee723d 100644 --- a/charts/redis-sentinel/values.yaml +++ b/charts/redis-sentinel/values.yaml @@ -21,7 +21,7 @@ redisSentinel: ignoreAnnotations: [] # - "redis.opstreelabs.in/ignore" minReadySeconds: 0 - + # Overwite name for resources # name: "" @@ -143,9 +143,24 @@ TLS: secret: secretName: "" -acl: - secret: - secretName: "" +pdb: + enabled: false + minAvailable: 1 + maxUnavailable: null + +livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 env: [] # - name: VAR_NAME From 147d7069f1c9f032859936dc0567c9f161adae73 Mon Sep 17 00:00:00 2001 From: yangw Date: Mon, 11 Nov 2024 15:06:00 +0800 Subject: [PATCH 02/28] feat: add master/replica service to redis replication (#1124) * refactor Signed-off-by: drivebyer * feat: add master/replica service to redis replication Signed-off-by: drivebyer * fix docs Signed-off-by: drivebyer * fix docs Signed-off-by: drivebyer --------- Signed-off-by: drivebyer --- api/common_types.go | 14 +++ .../en/docs/Configuration/Redis/_index.md | 91 +++++++++----- .../docs/Configuration/RedisCluster/_index.md | 113 ++++++++++++------ .../Configuration/RedisReplication/_index.md | 92 +++++++++----- pkg/k8sutils/redis-cluster.go | 27 ++--- pkg/k8sutils/redis-replication.go | 43 ++++--- pkg/k8sutils/redis-sentinel.go | 22 ++-- pkg/k8sutils/redis-standalone.go | 21 ++-- pkg/util/map.go | 13 ++ .../setup/redis-replication/ready-svc.yaml | 65 ++++++++++ 10 files changed, 351 insertions(+), 150 deletions(-) create mode 100644 pkg/util/map.go diff --git a/api/common_types.go b/api/common_types.go index c0212cf31..378bda1bc 100644 --- a/api/common_types.go +++ b/api/common_types.go @@ -19,6 +19,20 @@ type KubernetesConfig struct { MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` } +func (in *KubernetesConfig) GetServiceType() string { + if in.Service == nil { + return "ClusterIP" + } + return in.Service.ServiceType +} + +func (in *KubernetesConfig) GetServiceAnnotations() map[string]string { + if in.Service == nil { + return nil + } + return in.Service.ServiceAnnotations +} + // ServiceConfig define the type of service to be created and its annotations // +k8s:deepcopy-gen=true type ServiceConfig struct { diff --git a/docs/content/en/docs/Configuration/Redis/_index.md b/docs/content/en/docs/Configuration/Redis/_index.md index 460f7e007..a6748d514 100644 --- a/docs/content/en/docs/Configuration/Redis/_index.md +++ b/docs/content/en/docs/Configuration/Redis/_index.md @@ -7,36 +7,67 @@ description: > Configurations and parameters for Redis standalone --- -Redis standalone configuration can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/helm-charts/blob/main/charts/redis/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. +Redis standalone configuration can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/redis-operator/blob/master/charts/redis/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. ## Helm Configuration Parameters -| **Name** | **Value** | **Description** | -|-----------------------------------|--------------------------------|-----------------------------------------------------------------------------------------------| -| `imagePullSecrets` | [] | List of image pull secrets, in case redis image is getting pull from private registry | -| `redisStandalone.secretName` | redis-secret | Name of the existing secret in Kubernetes | -| `redisStandalone.secretKey` | password | Name of the existing secret key in Kubernetes | -| `redisStandalone.image` | quay.io/opstree/redis | Name of the redis image | -| `redisStandalone.tag` | v7.0.15 | Tag of the redis image | -| `redisStandalone.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis image | -| `redisStandalone.resources` | {} | Request and limits for redis statefulset | -| `externalService.enabled` | false | If redis service needs to be exposed using LoadBalancer or NodePort | -| `externalService.annotations` | {} | Kubernetes service related annotations | -| `externalService.serviceType` | NodePort | Kubernetes service type for exposing service, values - ClusterIP, NodePort, and LoadBalancer | -| `externalService.port` | 6379 | Port number on which redis external service should be exposed | -| `serviceMonitor.enabled` | false | Servicemonitor to monitor redis with Prometheus | -| `serviceMonitor.interval` | 30s | Interval at which metrics should be scraped. | -| `serviceMonitor.scrapeTimeout` | 10s | Timeout after which the scrape is ended | -| `serviceMonitor.namespace` | monitoring | Namespace in which Prometheus operator is running | -| `redisExporter.enabled` | true | Redis exporter should be deployed or not | -| `redisExporter.image` | quay.io/opstree/redis-exporter | Name of the redis exporter image | -| `redisExporter.tag` | v1.44.0 | Tag of the redis exporter image | -| `redisExporter.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis exporter image | -| `redisExporter.env` | [] | Extra environment variables which needs to be added in redis exporter | -| `nodeSelector` | {} | NodeSelector for redis statefulset | -| `priorityClassName` | "" | Priority class name for the redis statefulset | -| `storageSpec` | {} | Storage configuration for redis setup | -| `securityContext` | {} | Security Context for redis pods for changing system or kernel level parameters | -| `affinity` | {} | Affinity for node and pod for redis statefulset | -| `tolerations` | [] | Tolerations for redis statefulset | -| `sidecars` | [] | Sidecar containers to run alongside Redis pods | +| Key | Type | Default | Description | +|-----------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| acl.secret.secretName | string | `""` | | +| affinity | object | `{}` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `6379` | | +| externalService.serviceType | string | `"NodePort"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| nodeSelector | object | `{}` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| redisStandalone.ignoreAnnotations | list | `[]` | | +| redisStandalone.image | string | `"quay.io/opstree/redis"` | | +| redisStandalone.imagePullPolicy | string | `"IfNotPresent"` | | +| redisStandalone.imagePullSecrets | list | `[]` | | +| redisStandalone.minReadySeconds | int | `0` | | +| redisStandalone.name | string | `""` | | +| redisStandalone.redisSecret.secretKey | string | `""` | | +| redisStandalone.redisSecret.secretName | string | `""` | | +| redisStandalone.resources | object | `{}` | | +| redisStandalone.serviceType | string | `"ClusterIP"` | | +| redisStandalone.tag | string | `"v7.0.15"` | | +| securityContext | object | `{}` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | list | `[]` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/docs/content/en/docs/Configuration/RedisCluster/_index.md b/docs/content/en/docs/Configuration/RedisCluster/_index.md index a4a85b1ec..86bbc73a7 100644 --- a/docs/content/en/docs/Configuration/RedisCluster/_index.md +++ b/docs/content/en/docs/Configuration/RedisCluster/_index.md @@ -7,40 +7,85 @@ description: > Configurations and parameters for Redis cluster --- -Redis cluster can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/helm-charts/blob/main/charts/redis-cluster/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. +Redis cluster can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/redis-operator/blob/master/charts/redis-cluster/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. ## Helm Configuration Parameters -| **Name** | **Default Value** | **Description** | -|------------------------------------|--------------------------------|----------------------------------------------------------------------------------------------| -| `imagePullSecrets` | [] | List of image pull secrets, in case redis image is getting pull from private registry | -| `redisCluster.clusterSize` | 3 | Size of the redis cluster leader and follower nodes | -| `redisCluster.clusterVersion` | v7 | Major version of Redis setup, values can be v6 or v7 | -| `redisCluster.persistenceEnabled` | true | Persistence should be enabled or not in the Redis cluster setup | -| `redisCluster.secretName` | redis-secret | Name of the existing secret in Kubernetes | -| `redisCluster.secretKey` | password | Name of the existing secret key in Kubernetes | -| `redisCluster.image` | quay.io/opstree/redis | Name of the redis image | -| `redisCluster.tag` | v7.0.15 | Tag of the redis image | -| `redisCluster.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis image | -| `redisCluster.leaderServiceType` | ClusterIP | Kubernetes service type for Redis Leader | -| `redisCluster.followerServiceType` | ClusterIP | Kubernetes service type for Redis Follower | -| `externalService.enabled` | false | If redis service needs to be exposed using LoadBalancer or NodePort | -| `externalService.annotations` | {} | Kubernetes service related annotations | -| `externalService.serviceType` | NodePort | Kubernetes service type for exposing service, values - ClusterIP, NodePort, and LoadBalancer | -| `externalService.port` | 6379 | Port number on which redis external service should be exposed | -| `serviceMonitor.enabled` | false | Servicemonitor to monitor redis with Prometheus | -| `serviceMonitor.interval` | 30s | Interval at which metrics should be scraped. | -| `serviceMonitor.scrapeTimeout` | 10s | Timeout after which the scrape is ended | -| `serviceMonitor.namespace` | monitoring | Namespace in which Prometheus operator is running | -| `redisExporter.enabled` | true | Redis exporter should be deployed or not | -| `redisExporter.image` | quay.io/opstree/redis-exporter | Name of the redis exporter image | -| `redisExporter.tag` | v1.44.0 | Tag of the redis exporter image | -| `redisExporter.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis exporter image | -| `redisExporter.env` | [] | Extra environment variables which needs to be added in redis exporter | -| `sidecars` | [] | Sidecar container to run alongside Redis pods | -| `nodeSelector` | {} | NodeSelector for redis statefulset | -| `priorityClassName` | "" | Priority class name for the redis statefulset | -| `storageSpec` | {} | Storage configuration for redis setup | -| `securityContext` | {} | Security Context for redis pods for changing system or kernel level parameters | -| `affinity` | {} | Affinity for node and pods for redis statefulset | -| `tolerations` | [] | Tolerations for redis statefulset management | +| Key | Type | Default | Description | +|-------------------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| acl.secret.secretName | string | `""` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `6379` | | +| externalService.serviceType | string | `"LoadBalancer"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| redisCluster.clusterSize | int | `3` | | +| redisCluster.clusterVersion | string | `"v7"` | | +| redisCluster.follower.affinity | string | `nil` | | +| redisCluster.follower.nodeSelector | string | `nil` | | +| redisCluster.follower.pdb.enabled | bool | `false` | | +| redisCluster.follower.pdb.maxUnavailable | int | `1` | | +| redisCluster.follower.pdb.minAvailable | int | `1` | | +| redisCluster.follower.replicas | int | `3` | | +| redisCluster.follower.securityContext | object | `{}` | | +| redisCluster.follower.serviceType | string | `"ClusterIP"` | | +| redisCluster.follower.tolerations | list | `[]` | | +| redisCluster.image | string | `"quay.io/opstree/redis"` | | +| redisCluster.imagePullPolicy | string | `"IfNotPresent"` | | +| redisCluster.imagePullSecrets | object | `{}` | | +| redisCluster.leader.affinity | object | `{}` | | +| redisCluster.leader.nodeSelector | string | `nil` | | +| redisCluster.leader.pdb.enabled | bool | `false` | | +| redisCluster.leader.pdb.maxUnavailable | int | `1` | | +| redisCluster.leader.pdb.minAvailable | int | `1` | | +| redisCluster.leader.replicas | int | `3` | | +| redisCluster.leader.securityContext | object | `{}` | | +| redisCluster.leader.serviceType | string | `"ClusterIP"` | | +| redisCluster.leader.tolerations | list | `[]` | | +| redisCluster.minReadySeconds | int | `0` | | +| redisCluster.name | string | `""` | | +| redisCluster.persistenceEnabled | bool | `true` | | +| redisCluster.redisSecret.secretKey | string | `""` | | +| redisCluster.redisSecret.secretName | string | `""` | | +| redisCluster.resources | object | `{}` | | +| redisCluster.tag | string | `"v7.0.15"` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | object | `{}` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| storageSpec.nodeConfVolume | bool | `true` | | +| storageSpec.nodeConfVolumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.nodeConfVolumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | +| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | \ No newline at end of file diff --git a/docs/content/en/docs/Configuration/RedisReplication/_index.md b/docs/content/en/docs/Configuration/RedisReplication/_index.md index a51fc3da1..d9e8ab1a0 100644 --- a/docs/content/en/docs/Configuration/RedisReplication/_index.md +++ b/docs/content/en/docs/Configuration/RedisReplication/_index.md @@ -7,36 +7,68 @@ description: > Configurations and parameters for Redis replication --- -Redis replication configuration can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/helm-charts/blob/main/charts/redis-replication/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. +Redis replication configuration can be customized by [values.yaml](https://github.com/OT-CONTAINER-KIT/redis-operator/blob/master/charts/redis-replication/values.yaml). The recommended way of managing the setup is using `helm` but if the setup is not maintained by it, `YAML` CRD parameters can be modified in the manifest. ## Helm Configuration Parameters -| **Name** | **Value** | **Description** | -|-----------------------------------|--------------------------------|-----------------------------------------------------------------------------------------------| -| `imagePullSecrets` | [] | List of image pull secrets, in case redis image is getting pull from private registry | -| `redisReplication.secretName` | redis-secret | Name of the existing secret in Kubernetes | -| `redisReplication.secretKey` | password | Name of the existing secret key in Kubernetes | -| `redisReplication.image` | quay.io/opstree/redis | Name of the redis image | -| `redisReplication.tag` | v7.0.15 | Tag of the redis image | -| `redisReplication.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis image | -| `redisReplication.resources` | {} | Request and limits for redis statefulset | -| `externalService.enabled` | false | If redis service needs to be exposed using LoadBalancer or NodePort | -| `externalService.annotations` | {} | Kubernetes service related annotations | -| `externalService.serviceType` | NodePort | Kubernetes service type for exposing service, values - ClusterIP, NodePort, and LoadBalancer | -| `externalService.port` | 6379 | Port number on which redis external service should be exposed | -| `serviceMonitor.enabled` | false | Servicemonitor to monitor redis with Prometheus | -| `serviceMonitor.interval` | 30s | Interval at which metrics should be scraped. | -| `serviceMonitor.scrapeTimeout` | 10s | Timeout after which the scrape is ended | -| `serviceMonitor.namespace` | monitoring | Namespace in which Prometheus operator is running | -| `redisExporter.enabled` | true | Redis exporter should be deployed or not | -| `redisExporter.image` | quay.io/opstree/redis-exporter | Name of the redis exporter image | -| `redisExporter.tag` | v1.44.0 | Tag of the redis exporter image | -| `redisExporter.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis exporter image | -| `redisExporter.env` | [] | Extra environment variables which needs to be added in redis exporter | -| `nodeSelector` | {} | NodeSelector for redis statefulset | -| `priorityClassName` | "" | Priority class name for the redis statefulset | -| `storageSpec` | {} | Storage configuration for redis setup | -| `securityContext` | {} | Security Context for redis pods for changing system or kernel level parameters | -| `affinity` | {} | Affinity for node and pod for redis statefulset | -| `tolerations` | [] | Tolerations for redis statefulset | -| `sidecars` | [] | Sidecar containers to run alongside Redis pods | +| Key | Type | Default | Description | +|-----------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| acl.secret.secretName | string | `""` | | +| affinity | object | `{}` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `6379` | | +| externalService.serviceType | string | `"NodePort"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| nodeSelector | object | `{}` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| redisReplication.clusterSize | int | `3` | | +| redisReplication.ignoreAnnotations | list | `[]` | | +| redisReplication.image | string | `"quay.io/opstree/redis"` | | +| redisReplication.imagePullPolicy | string | `"IfNotPresent"` | | +| redisReplication.imagePullSecrets | list | `[]` | | +| redisReplication.minReadySeconds | int | `0` | | +| redisReplication.name | string | `""` | | +| redisReplication.redisSecret.secretKey | string | `""` | | +| redisReplication.redisSecret.secretName | string | `""` | | +| redisReplication.resources | object | `{}` | | +| redisReplication.serviceType | string | `"ClusterIP"` | | +| redisReplication.tag | string | `"v7.0.15"` | | +| securityContext | object | `{}` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | list | `[]` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index 9cc63ecd3..7fe9515dd 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -119,7 +119,7 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars } - if cr.Spec.KubernetesConfig.Service != nil && cr.Spec.KubernetesConfig.Service.ServiceType == "NodePort" { + if cr.Spec.KubernetesConfig.GetServiceType() == "NodePort" { envVars := util.Coalesce(containerProp.EnvVars, &[]corev1.EnvVar{}) *envVars = append(*envVars, corev1.EnvVar{ Name: "NODEPORT", @@ -306,13 +306,9 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re epp = disableMetrics } annotations := generateServiceAnots(cr.ObjectMeta, nil, epp) - additionalServiceAnnotations := map[string]string{} - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceAnnotations = cr.Spec.KubernetesConfig.Service.ServiceAnnotations - } objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) - additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, additionalServiceAnnotations, epp)) + additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) if err != nil { logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) @@ -323,17 +319,14 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } - additionalServiceType := "ClusterIP" - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceType = cr.Spec.KubernetesConfig.Service.ServiceType - if additionalServiceType == "NodePort" { - // If NodePort is enabled, we need to create a service for every redis pod. - // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. - err = service.createOrUpdateClusterNodePortService(cr, cl) - if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) - return err - } + additionalServiceType := cr.Spec.KubernetesConfig.GetServiceType() + if additionalServiceType == "NodePort" { + // If NodePort is enabled, we need to create a service for every redis pod. + // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. + err = service.createOrUpdateClusterNodePortService(cr, cl) + if err != nil { + logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + return err } } err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index 66c066492..d1287e4f6 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -15,42 +15,49 @@ import ( func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) - var epp exporterPortProvider + + epp := disableMetrics if cr.Spec.RedisExporter != nil { epp = func() (port int, enable bool) { defaultP := ptr.To(redisExporterPort) return *util.Coalesce(cr.Spec.RedisExporter.Port, defaultP), cr.Spec.RedisExporter.Enabled } - } else { - epp = disableMetrics } + annotations := generateServiceAnots(cr.ObjectMeta, nil, epp) - additionalServiceAnnotations := map[string]string{} - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceAnnotations = cr.Spec.KubernetesConfig.Service.ServiceAnnotations - } objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-headless", cr.Namespace, labels, annotations) - additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, additionalServiceAnnotations, epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) - if err != nil { + additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) + masterLabels := util.MergeMap( + labels, map[string]string{RedisRoleLabelKey: RedisRoleLabelMaster}, + ) + replicaLabels := util.MergeMap( + labels, map[string]string{RedisRoleLabelKey: RedisRoleLabelSlave}, + ) + masterObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-master", cr.Namespace, masterLabels, annotations) + replicaObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-replica", cr.Namespace, replicaLabels, annotations) + + if err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { logger.Error(err, "Cannot create replication headless service for Redis") return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) - if err != nil { + if err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { logger.Error(err, "Cannot create replication service for Redis") return err } - additionalServiceType := "ClusterIP" - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceType = cr.Spec.KubernetesConfig.Service.ServiceType - } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, additionalServiceType, redisPort, cl) - if err != nil { + if err := CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { logger.Error(err, "Cannot create additional service for Redis Replication") return err } + if err := CreateOrUpdateService(cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + logger.Error(err, "Cannot create master service for Redis") + return err + } + if err := CreateOrUpdateService(cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + logger.Error(err, "Cannot create replica service for Redis") + return err + } + return nil } diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 873b05e1c..3d2f4ba6b 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -220,14 +220,9 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. epp = disableMetrics } annotations := generateServiceAnots(cr.ObjectMeta, nil, epp) - additionalServiceAnnotations := map[string]string{} - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceAnnotations = cr.Spec.KubernetesConfig.Service.ServiceAnnotations - } - objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) - additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, additionalServiceAnnotations, epp)) + additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) if err != nil { @@ -240,11 +235,16 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. return err } - additionalServiceType := "ClusterIP" - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceType = cr.Spec.KubernetesConfig.Service.ServiceType - } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, false, additionalServiceType, sentinelPort, cl) + err = CreateOrUpdateService( + cr.Namespace, + additionalObjectMetaInfo, + redisSentinelAsOwner(cr), + disableMetrics, + false, + cr.Spec.KubernetesConfig.GetServiceType(), + sentinelPort, + cl, + ) if err != nil { logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index 4fed7d135..05a911760 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -21,13 +21,9 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er epp = disableMetrics } annotations := generateServiceAnots(cr.ObjectMeta, nil, epp) - additionalServiceAnnotations := map[string]string{} - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceAnnotations = cr.Spec.KubernetesConfig.Service.ServiceAnnotations - } objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-headless", cr.Namespace, labels, annotations) - additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, additionalServiceAnnotations, epp)) + additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) if err != nil { logger.Error(err, "Cannot create standalone headless service for Redis") @@ -38,11 +34,16 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er logger.Error(err, "Cannot create standalone service for Redis") return err } - additionalServiceType := "ClusterIP" - if cr.Spec.KubernetesConfig.Service != nil { - additionalServiceType = cr.Spec.KubernetesConfig.Service.ServiceType - } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisAsOwner(cr), disableMetrics, false, additionalServiceType, redisPort, cl) + err = CreateOrUpdateService( + cr.Namespace, + additionalObjectMetaInfo, + redisAsOwner(cr), + disableMetrics, + false, + cr.Spec.KubernetesConfig.GetServiceType(), + redisPort, + cl, + ) if err != nil { logger.Error(err, "Cannot create additional service for Redis") return err diff --git a/pkg/util/map.go b/pkg/util/map.go new file mode 100644 index 000000000..0add4b4af --- /dev/null +++ b/pkg/util/map.go @@ -0,0 +1,13 @@ +package util + +// MergeMap merges all the label maps received as argument into a single new label map. +func MergeMap(all ...map[string]string) map[string]string { + res := map[string]string{} + + for _, labels := range all { + for k, v := range labels { + res[k] = v + } + } + return res +} diff --git a/tests/e2e-chainsaw/v1beta2/setup/redis-replication/ready-svc.yaml b/tests/e2e-chainsaw/v1beta2/setup/redis-replication/ready-svc.yaml index 3e2e7ccbb..3bb1ba962 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/redis-replication/ready-svc.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/redis-replication/ready-svc.yaml @@ -89,3 +89,68 @@ spec: redis_setup_type: replication role: replication type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-replication + labels: + app: redis-replication + redis-role: master + redis_setup_type: replication + role: replication + name: redis-replication-master + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisReplication + name: redis-replication +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-replication + redis-role: master + redis_setup_type: replication + role: replication + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-replication + labels: + app: redis-replication + redis-role: slave + redis_setup_type: replication + role: replication + name: redis-replication-replica + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisReplication + name: redis-replication +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-replication + redis-role: slave + redis_setup_type: replication + role: replication + sessionAffinity: None + type: ClusterIP From 5a8a3452d7de82ada24305ec40aa4d63baa201b1 Mon Sep 17 00:00:00 2001 From: yangw Date: Wed, 13 Nov 2024 15:55:06 +0800 Subject: [PATCH 03/28] feat: add recreateStatefulSetOnUpdateInvalid helm chart value (#1127) Signed-off-by: drivebyer --- charts/redis-cluster/Chart.yaml | 4 +- charts/redis-cluster/README.md | 1 + .../templates/redis-cluster.yaml | 5 +- charts/redis-cluster/values.yaml | 3 + charts/redis-replication/Chart.yaml | 4 +- charts/redis-replication/README.md | 1 + .../templates/redis-replication.yaml | 4 + charts/redis-replication/values.yaml | 5 +- charts/redis-sentinel/Chart.yaml | 4 +- charts/redis-sentinel/README.md | 1 + .../templates/redis-sentinel.yaml | 4 + charts/redis-sentinel/values.yaml | 3 + charts/redis/Chart.yaml | 4 +- charts/redis/README.md | 1 + charts/redis/templates/redis-standalone.yaml | 4 + charts/redis/values.yaml | 3 + .../en/docs/Configuration/Redis/_index.md | 121 +++++++------- .../docs/Configuration/RedisCluster/_index.md | 157 +++++++++--------- .../Configuration/RedisReplication/_index.md | 142 +++++++++------- 19 files changed, 262 insertions(+), 209 deletions(-) diff --git a/charts/redis-cluster/Chart.yaml b/charts/redis-cluster/Chart.yaml index 6188d6b50..81bf93921 100644 --- a/charts/redis-cluster/Chart.yaml +++ b/charts/redis-cluster/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-cluster description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.1 -appVersion: "0.16.1" +version: 0.16.2 +appVersion: "0.16.2" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-cluster/README.md b/charts/redis-cluster/README.md index 6792c63ca..b5a0ce1d0 100644 --- a/charts/redis-cluster/README.md +++ b/charts/redis-cluster/README.md @@ -94,6 +94,7 @@ helm delete --namespace | redisCluster.minReadySeconds | int | `0` | | | redisCluster.name | string | `""` | | | redisCluster.persistenceEnabled | bool | `true` | | +| redisCluster.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | | redisCluster.redisSecret.secretKey | string | `""` | | | redisCluster.redisSecret.secretName | string | `""` | | | redisCluster.resources | object | `{}` | | diff --git a/charts/redis-cluster/templates/redis-cluster.yaml b/charts/redis-cluster/templates/redis-cluster.yaml index e19349c9b..fd12b409e 100644 --- a/charts/redis-cluster/templates/redis-cluster.yaml +++ b/charts/redis-cluster/templates/redis-cluster.yaml @@ -4,11 +4,14 @@ kind: RedisCluster metadata: name: {{ .Values.redisCluster.name | default .Release.Name }} labels: {{- include "common.labels" . | nindent 4 }} + annotations: + {{ if .Values.redisCluster.recreateStatefulSetOnUpdateInvalid }} + redis.opstreelabs.in/recreate-statefulset: "true" + {{ end }} spec: clusterSize: {{ .Values.redisCluster.clusterSize }} persistenceEnabled: {{ .Values.redisCluster.persistenceEnabled }} clusterVersion: {{ .Values.redisCluster.clusterVersion }} - redisLeader: {{- include "redis.role" .Values.redisCluster.leader | nindent 4 }} replicas: {{ .Values.redisCluster.leader.replicas }} {{- if .Values.externalConfig.enabled }} diff --git a/charts/redis-cluster/values.yaml b/charts/redis-cluster/values.yaml index 1e8dfd782..2510b0b85 100644 --- a/charts/redis-cluster/values.yaml +++ b/charts/redis-cluster/values.yaml @@ -20,6 +20,9 @@ redisCluster: # cpu: 100m # memory: 128Mi minReadySeconds: 0 + # -- Some fields of statefulset are immutable, such as volumeClaimTemplates. + # When set to true, the operator will delete the statefulset and recreate it. Default is false. + recreateStatefulSetOnUpdateInvalid: false leader: replicas: 3 serviceType: ClusterIP diff --git a/charts/redis-replication/Chart.yaml b/charts/redis-replication/Chart.yaml index c1e90dde6..dbc2fde62 100644 --- a/charts/redis-replication/Chart.yaml +++ b/charts/redis-replication/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-replication description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.3 -appVersion: "0.16.3" +version: 0.16.4 +appVersion: "0.16.4" type: application engine: gotpl maintainers: diff --git a/charts/redis-replication/README.md b/charts/redis-replication/README.md index 88151ca7c..14597c8b6 100644 --- a/charts/redis-replication/README.md +++ b/charts/redis-replication/README.md @@ -81,6 +81,7 @@ helm delete --namespace | redisReplication.imagePullSecrets | list | `[]` | | | redisReplication.minReadySeconds | int | `0` | | | redisReplication.name | string | `""` | | +| redisReplication.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | | redisReplication.redisSecret.secretKey | string | `""` | | | redisReplication.redisSecret.secretName | string | `""` | | | redisReplication.resources | object | `{}` | | diff --git a/charts/redis-replication/templates/redis-replication.yaml b/charts/redis-replication/templates/redis-replication.yaml index 3c40f3405..49021123e 100644 --- a/charts/redis-replication/templates/redis-replication.yaml +++ b/charts/redis-replication/templates/redis-replication.yaml @@ -4,6 +4,10 @@ kind: RedisReplication metadata: name: {{ .Values.redisReplication.name | default .Release.Name }} labels: {{- include "common.labels" . | nindent 4 }} + annotations: + {{ if .Values.redisReplication.recreateStatefulSetOnUpdateInvalid }} + redis.opstreelabs.in/recreate-statefulset: "true" + {{ end }} spec: clusterSize: {{ .Values.redisReplication.clusterSize }} kubernetesConfig: diff --git a/charts/redis-replication/values.yaml b/charts/redis-replication/values.yaml index 02066ddf8..73916bbe3 100644 --- a/charts/redis-replication/values.yaml +++ b/charts/redis-replication/values.yaml @@ -21,7 +21,10 @@ redisReplication: ignoreAnnotations: [] # - "redis.opstreelabs.in/ignore" minReadySeconds: 0 - + # -- Some fields of statefulset are immutable, such as volumeClaimTemplates. + # When set to true, the operator will delete the statefulset and recreate it. Default is false. + recreateStatefulSetOnUpdateInvalid: false + # Overwite name for resources # name: "" diff --git a/charts/redis-sentinel/Chart.yaml b/charts/redis-sentinel/Chart.yaml index 7ca231b94..883873ff0 100644 --- a/charts/redis-sentinel/Chart.yaml +++ b/charts/redis-sentinel/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-sentinel description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.5 -appVersion: "0.16.5" +version: 0.16.6 +appVersion: "0.16.6" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-sentinel/README.md b/charts/redis-sentinel/README.md index 5f6e82e81..258da9c5a 100644 --- a/charts/redis-sentinel/README.md +++ b/charts/redis-sentinel/README.md @@ -93,6 +93,7 @@ helm delete --namespace | redisSentinel.imagePullSecrets | list | `[]` | | | redisSentinel.minReadySeconds | int | `0` | | | redisSentinel.name | string | `""` | | +| redisSentinel.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | | redisSentinel.redisSecret.secretKey | string | `""` | | | redisSentinel.redisSecret.secretName | string | `""` | | | redisSentinel.resources | object | `{}` | | diff --git a/charts/redis-sentinel/templates/redis-sentinel.yaml b/charts/redis-sentinel/templates/redis-sentinel.yaml index 4ad8ac315..16581b49f 100644 --- a/charts/redis-sentinel/templates/redis-sentinel.yaml +++ b/charts/redis-sentinel/templates/redis-sentinel.yaml @@ -4,6 +4,10 @@ kind: RedisSentinel metadata: name: {{ .Values.redisSentinel.name | default .Release.Name }} labels: {{- include "common.labels" . | nindent 4 }} + annotations: + {{ if .Values.redisSentinel.recreateStatefulSetOnUpdateInvalid }} + redis.opstreelabs.in/recreate-statefulset: "true" + {{ end }} spec: clusterSize: {{ .Values.redisSentinel.clusterSize }} # Sentinel Config diff --git a/charts/redis-sentinel/values.yaml b/charts/redis-sentinel/values.yaml index fc1ee723d..640920ede 100644 --- a/charts/redis-sentinel/values.yaml +++ b/charts/redis-sentinel/values.yaml @@ -21,6 +21,9 @@ redisSentinel: ignoreAnnotations: [] # - "redis.opstreelabs.in/ignore" minReadySeconds: 0 + # -- Some fields of statefulset are immutable, such as volumeClaimTemplates. + # When set to true, the operator will delete the statefulset and recreate it. Default is false. + recreateStatefulSetOnUpdateInvalid: false # Overwite name for resources # name: "" diff --git a/charts/redis/Chart.yaml b/charts/redis/Chart.yaml index f9c55d384..c8e772101 100644 --- a/charts/redis/Chart.yaml +++ b/charts/redis/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: redis description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.1 -appVersion: "0.16.1" +version: 0.16.2 +appVersion: "0.16.2" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis/README.md b/charts/redis/README.md index 3dce65800..acae504cd 100644 --- a/charts/redis/README.md +++ b/charts/redis/README.md @@ -79,6 +79,7 @@ helm delete --namespace | redisStandalone.imagePullSecrets | list | `[]` | | | redisStandalone.minReadySeconds | int | `0` | | | redisStandalone.name | string | `""` | | +| redisStandalone.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | | redisStandalone.redisSecret.secretKey | string | `""` | | | redisStandalone.redisSecret.secretName | string | `""` | | | redisStandalone.resources | object | `{}` | | diff --git a/charts/redis/templates/redis-standalone.yaml b/charts/redis/templates/redis-standalone.yaml index be0fb51e2..ebfce6408 100644 --- a/charts/redis/templates/redis-standalone.yaml +++ b/charts/redis/templates/redis-standalone.yaml @@ -4,6 +4,10 @@ kind: Redis metadata: name: {{ .Values.redisStandalone.name | default .Release.Name }} labels: {{- include "common.labels" . | nindent 4 }} + annotations: + {{ if .Values.redisStandalone.recreateStatefulSetOnUpdateInvalid }} + redis.opstreelabs.in/recreate-statefulset: "true" + {{ end }} spec: kubernetesConfig: image: "{{ .Values.redisStandalone.image }}:{{ .Values.redisStandalone.tag }}" diff --git a/charts/redis/values.yaml b/charts/redis/values.yaml index 30b901a60..e628c4c45 100644 --- a/charts/redis/values.yaml +++ b/charts/redis/values.yaml @@ -20,6 +20,9 @@ redisStandalone: ignoreAnnotations: [] # - "redis.opstreelabs.in/ignore" minReadySeconds: 0 + # -- Some fields of statefulset are immutable, such as volumeClaimTemplates. + # When set to true, the operator will delete the statefulset and recreate it. Default is false. + recreateStatefulSetOnUpdateInvalid: false labels: {} # foo: bar diff --git a/docs/content/en/docs/Configuration/Redis/_index.md b/docs/content/en/docs/Configuration/Redis/_index.md index a6748d514..25e278811 100644 --- a/docs/content/en/docs/Configuration/Redis/_index.md +++ b/docs/content/en/docs/Configuration/Redis/_index.md @@ -11,63 +11,64 @@ Redis standalone configuration can be customized by [values.yaml](https://github ## Helm Configuration Parameters -| Key | Type | Default | Description | -|-----------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| -| TLS.ca | string | `"ca.key"` | | -| TLS.cert | string | `"tls.crt"` | | -| TLS.key | string | `"tls.key"` | | -| TLS.secret.secretName | string | `""` | | -| acl.secret.secretName | string | `""` | | -| affinity | object | `{}` | | -| env | list | `[]` | | -| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | -| externalConfig.enabled | bool | `false` | | -| externalService.enabled | bool | `false` | | -| externalService.port | int | `6379` | | -| externalService.serviceType | string | `"NodePort"` | | -| initContainer.args | list | `[]` | | -| initContainer.command | list | `[]` | | -| initContainer.enabled | bool | `false` | | -| initContainer.env | list | `[]` | | -| initContainer.image | string | `""` | | -| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | -| initContainer.resources | object | `{}` | | -| labels | object | `{}` | | -| nodeSelector | object | `{}` | | -| podSecurityContext.fsGroup | int | `1000` | | -| podSecurityContext.runAsUser | int | `1000` | | -| priorityClassName | string | `""` | | -| redisExporter.enabled | bool | `false` | | -| redisExporter.env | list | `[]` | | -| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | -| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | -| redisExporter.resources | object | `{}` | | -| redisExporter.tag | string | `"v1.44.0"` | | -| redisStandalone.ignoreAnnotations | list | `[]` | | -| redisStandalone.image | string | `"quay.io/opstree/redis"` | | -| redisStandalone.imagePullPolicy | string | `"IfNotPresent"` | | -| redisStandalone.imagePullSecrets | list | `[]` | | -| redisStandalone.minReadySeconds | int | `0` | | -| redisStandalone.name | string | `""` | | -| redisStandalone.redisSecret.secretKey | string | `""` | | -| redisStandalone.redisSecret.secretName | string | `""` | | -| redisStandalone.resources | object | `{}` | | -| redisStandalone.serviceType | string | `"ClusterIP"` | | -| redisStandalone.tag | string | `"v7.0.15"` | | -| securityContext | object | `{}` | | -| serviceAccountName | string | `""` | | -| serviceMonitor.enabled | bool | `false` | | -| serviceMonitor.interval | string | `"30s"` | | -| serviceMonitor.namespace | string | `"monitoring"` | | -| serviceMonitor.scrapeTimeout | string | `"10s"` | | -| sidecars.env | list | `[]` | | -| sidecars.image | string | `""` | | -| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | -| sidecars.name | string | `""` | | -| sidecars.resources.limits.cpu | string | `"100m"` | | -| sidecars.resources.limits.memory | string | `"128Mi"` | | -| sidecars.resources.requests.cpu | string | `"50m"` | | -| sidecars.resources.requests.memory | string | `"64Mi"` | | -| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | -| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | -| tolerations | list | `[]` | | \ No newline at end of file +| Key | Type | Default | Description | +|-----------------------------------------------------------------|--------|--------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| acl.secret.secretName | string | `""` | | +| affinity | object | `{}` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `6379` | | +| externalService.serviceType | string | `"NodePort"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| nodeSelector | object | `{}` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| redisStandalone.ignoreAnnotations | list | `[]` | | +| redisStandalone.image | string | `"quay.io/opstree/redis"` | | +| redisStandalone.imagePullPolicy | string | `"IfNotPresent"` | | +| redisStandalone.imagePullSecrets | list | `[]` | | +| redisStandalone.minReadySeconds | int | `0` | | +| redisStandalone.name | string | `""` | | +| redisStandalone.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | +| redisStandalone.redisSecret.secretKey | string | `""` | | +| redisStandalone.redisSecret.secretName | string | `""` | | +| redisStandalone.resources | object | `{}` | | +| redisStandalone.serviceType | string | `"ClusterIP"` | | +| redisStandalone.tag | string | `"v7.0.15"` | | +| securityContext | object | `{}` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | list | `[]` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/docs/content/en/docs/Configuration/RedisCluster/_index.md b/docs/content/en/docs/Configuration/RedisCluster/_index.md index 86bbc73a7..111be1497 100644 --- a/docs/content/en/docs/Configuration/RedisCluster/_index.md +++ b/docs/content/en/docs/Configuration/RedisCluster/_index.md @@ -11,81 +11,82 @@ Redis cluster can be customized by [values.yaml](https://github.com/OT-CONTAINER ## Helm Configuration Parameters -| Key | Type | Default | Description | -|-------------------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| -| TLS.ca | string | `"ca.key"` | | -| TLS.cert | string | `"tls.crt"` | | -| TLS.key | string | `"tls.key"` | | -| TLS.secret.secretName | string | `""` | | -| acl.secret.secretName | string | `""` | | -| env | list | `[]` | | -| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | -| externalConfig.enabled | bool | `false` | | -| externalService.enabled | bool | `false` | | -| externalService.port | int | `6379` | | -| externalService.serviceType | string | `"LoadBalancer"` | | -| initContainer.args | list | `[]` | | -| initContainer.command | list | `[]` | | -| initContainer.enabled | bool | `false` | | -| initContainer.env | list | `[]` | | -| initContainer.image | string | `""` | | -| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | -| initContainer.resources | object | `{}` | | -| labels | object | `{}` | | -| podSecurityContext.fsGroup | int | `1000` | | -| podSecurityContext.runAsUser | int | `1000` | | -| priorityClassName | string | `""` | | -| redisCluster.clusterSize | int | `3` | | -| redisCluster.clusterVersion | string | `"v7"` | | -| redisCluster.follower.affinity | string | `nil` | | -| redisCluster.follower.nodeSelector | string | `nil` | | -| redisCluster.follower.pdb.enabled | bool | `false` | | -| redisCluster.follower.pdb.maxUnavailable | int | `1` | | -| redisCluster.follower.pdb.minAvailable | int | `1` | | -| redisCluster.follower.replicas | int | `3` | | -| redisCluster.follower.securityContext | object | `{}` | | -| redisCluster.follower.serviceType | string | `"ClusterIP"` | | -| redisCluster.follower.tolerations | list | `[]` | | -| redisCluster.image | string | `"quay.io/opstree/redis"` | | -| redisCluster.imagePullPolicy | string | `"IfNotPresent"` | | -| redisCluster.imagePullSecrets | object | `{}` | | -| redisCluster.leader.affinity | object | `{}` | | -| redisCluster.leader.nodeSelector | string | `nil` | | -| redisCluster.leader.pdb.enabled | bool | `false` | | -| redisCluster.leader.pdb.maxUnavailable | int | `1` | | -| redisCluster.leader.pdb.minAvailable | int | `1` | | -| redisCluster.leader.replicas | int | `3` | | -| redisCluster.leader.securityContext | object | `{}` | | -| redisCluster.leader.serviceType | string | `"ClusterIP"` | | -| redisCluster.leader.tolerations | list | `[]` | | -| redisCluster.minReadySeconds | int | `0` | | -| redisCluster.name | string | `""` | | -| redisCluster.persistenceEnabled | bool | `true` | | -| redisCluster.redisSecret.secretKey | string | `""` | | -| redisCluster.redisSecret.secretName | string | `""` | | -| redisCluster.resources | object | `{}` | | -| redisCluster.tag | string | `"v7.0.15"` | | -| redisExporter.enabled | bool | `false` | | -| redisExporter.env | list | `[]` | | -| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | -| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | -| redisExporter.resources | object | `{}` | | -| redisExporter.tag | string | `"v1.44.0"` | | -| serviceAccountName | string | `""` | | -| serviceMonitor.enabled | bool | `false` | | -| serviceMonitor.interval | string | `"30s"` | | -| serviceMonitor.namespace | string | `"monitoring"` | | -| serviceMonitor.scrapeTimeout | string | `"10s"` | | -| sidecars.env | object | `{}` | | -| sidecars.image | string | `""` | | -| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | -| sidecars.name | string | `""` | | -| sidecars.resources.limits.cpu | string | `"100m"` | | -| sidecars.resources.limits.memory | string | `"128Mi"` | | -| sidecars.resources.requests.cpu | string | `"50m"` | | -| sidecars.resources.requests.memory | string | `"64Mi"` | | -| storageSpec.nodeConfVolume | bool | `true` | | -| storageSpec.nodeConfVolumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | -| storageSpec.nodeConfVolumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | -| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | -| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | \ No newline at end of file +| Key | Type | Default | Description | +|-------------------------------------------------------------------------|--------|--------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| acl.secret.secretName | string | `""` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `6379` | | +| externalService.serviceType | string | `"LoadBalancer"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| redisCluster.clusterSize | int | `3` | | +| redisCluster.clusterVersion | string | `"v7"` | | +| redisCluster.follower.affinity | string | `nil` | | +| redisCluster.follower.nodeSelector | string | `nil` | | +| redisCluster.follower.pdb.enabled | bool | `false` | | +| redisCluster.follower.pdb.maxUnavailable | int | `1` | | +| redisCluster.follower.pdb.minAvailable | int | `1` | | +| redisCluster.follower.replicas | int | `3` | | +| redisCluster.follower.securityContext | object | `{}` | | +| redisCluster.follower.serviceType | string | `"ClusterIP"` | | +| redisCluster.follower.tolerations | list | `[]` | | +| redisCluster.image | string | `"quay.io/opstree/redis"` | | +| redisCluster.imagePullPolicy | string | `"IfNotPresent"` | | +| redisCluster.imagePullSecrets | object | `{}` | | +| redisCluster.leader.affinity | object | `{}` | | +| redisCluster.leader.nodeSelector | string | `nil` | | +| redisCluster.leader.pdb.enabled | bool | `false` | | +| redisCluster.leader.pdb.maxUnavailable | int | `1` | | +| redisCluster.leader.pdb.minAvailable | int | `1` | | +| redisCluster.leader.replicas | int | `3` | | +| redisCluster.leader.securityContext | object | `{}` | | +| redisCluster.leader.serviceType | string | `"ClusterIP"` | | +| redisCluster.leader.tolerations | list | `[]` | | +| redisCluster.minReadySeconds | int | `0` | | +| redisCluster.name | string | `""` | | +| redisCluster.persistenceEnabled | bool | `true` | | +| redisCluster.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | +| redisCluster.redisSecret.secretKey | string | `""` | | +| redisCluster.redisSecret.secretName | string | `""` | | +| redisCluster.resources | object | `{}` | | +| redisCluster.tag | string | `"v7.0.15"` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | object | `{}` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| storageSpec.nodeConfVolume | bool | `true` | | +| storageSpec.nodeConfVolumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.nodeConfVolumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | +| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | +| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | \ No newline at end of file diff --git a/docs/content/en/docs/Configuration/RedisReplication/_index.md b/docs/content/en/docs/Configuration/RedisReplication/_index.md index d9e8ab1a0..785f6128d 100644 --- a/docs/content/en/docs/Configuration/RedisReplication/_index.md +++ b/docs/content/en/docs/Configuration/RedisReplication/_index.md @@ -11,64 +11,84 @@ Redis replication configuration can be customized by [values.yaml](https://githu ## Helm Configuration Parameters -| Key | Type | Default | Description | -|-----------------------------------------------------------------|--------|--------------------------------------------------------------------------|-------------| -| TLS.ca | string | `"ca.key"` | | -| TLS.cert | string | `"tls.crt"` | | -| TLS.key | string | `"tls.key"` | | -| TLS.secret.secretName | string | `""` | | -| acl.secret.secretName | string | `""` | | -| affinity | object | `{}` | | -| env | list | `[]` | | -| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | -| externalConfig.enabled | bool | `false` | | -| externalService.enabled | bool | `false` | | -| externalService.port | int | `6379` | | -| externalService.serviceType | string | `"NodePort"` | | -| initContainer.args | list | `[]` | | -| initContainer.command | list | `[]` | | -| initContainer.enabled | bool | `false` | | -| initContainer.env | list | `[]` | | -| initContainer.image | string | `""` | | -| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | -| initContainer.resources | object | `{}` | | -| labels | object | `{}` | | -| nodeSelector | object | `{}` | | -| podSecurityContext.fsGroup | int | `1000` | | -| podSecurityContext.runAsUser | int | `1000` | | -| priorityClassName | string | `""` | | -| redisExporter.enabled | bool | `false` | | -| redisExporter.env | list | `[]` | | -| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | -| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | -| redisExporter.resources | object | `{}` | | -| redisExporter.tag | string | `"v1.44.0"` | | -| redisReplication.clusterSize | int | `3` | | -| redisReplication.ignoreAnnotations | list | `[]` | | -| redisReplication.image | string | `"quay.io/opstree/redis"` | | -| redisReplication.imagePullPolicy | string | `"IfNotPresent"` | | -| redisReplication.imagePullSecrets | list | `[]` | | -| redisReplication.minReadySeconds | int | `0` | | -| redisReplication.name | string | `""` | | -| redisReplication.redisSecret.secretKey | string | `""` | | -| redisReplication.redisSecret.secretName | string | `""` | | -| redisReplication.resources | object | `{}` | | -| redisReplication.serviceType | string | `"ClusterIP"` | | -| redisReplication.tag | string | `"v7.0.15"` | | -| securityContext | object | `{}` | | -| serviceAccountName | string | `""` | | -| serviceMonitor.enabled | bool | `false` | | -| serviceMonitor.interval | string | `"30s"` | | -| serviceMonitor.namespace | string | `"monitoring"` | | -| serviceMonitor.scrapeTimeout | string | `"10s"` | | -| sidecars.env | list | `[]` | | -| sidecars.image | string | `""` | | -| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | -| sidecars.name | string | `""` | | -| sidecars.resources.limits.cpu | string | `"100m"` | | -| sidecars.resources.limits.memory | string | `"128Mi"` | | -| sidecars.resources.requests.cpu | string | `"50m"` | | -| sidecars.resources.requests.memory | string | `"64Mi"` | | -| storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | -| storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"1Gi"` | | -| tolerations | list | `[]` | | \ No newline at end of file +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| TLS.ca | string | `"ca.key"` | | +| TLS.cert | string | `"tls.crt"` | | +| TLS.key | string | `"tls.key"` | | +| TLS.secret.secretName | string | `""` | | +| affinity | object | `{}` | | +| env | list | `[]` | | +| externalConfig.data | string | `"tcp-keepalive 400\nslowlog-max-len 158\nstream-node-max-bytes 2048\n"` | | +| externalConfig.enabled | bool | `false` | | +| externalService.enabled | bool | `false` | | +| externalService.port | int | `26379` | | +| externalService.serviceType | string | `"NodePort"` | | +| initContainer.args | list | `[]` | | +| initContainer.command | list | `[]` | | +| initContainer.enabled | bool | `false` | | +| initContainer.env | list | `[]` | | +| initContainer.image | string | `""` | | +| initContainer.imagePullPolicy | string | `"IfNotPresent"` | | +| initContainer.resources | object | `{}` | | +| labels | object | `{}` | | +| livenessProbe.failureThreshold | int | `3` | | +| livenessProbe.initialDelaySeconds | int | `1` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `1` | | +| nodeSelector | object | `{}` | | +| pdb.enabled | bool | `false` | | +| pdb.maxUnavailable | string | `nil` | | +| pdb.minAvailable | int | `1` | | +| podSecurityContext.fsGroup | int | `1000` | | +| podSecurityContext.runAsUser | int | `1000` | | +| priorityClassName | string | `""` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.initialDelaySeconds | int | `1` | | +| readinessProbe.periodSeconds | int | `10` | | +| readinessProbe.successThreshold | int | `1` | | +| readinessProbe.timeoutSeconds | int | `1` | | +| redisExporter.enabled | bool | `false` | | +| redisExporter.env | list | `[]` | | +| redisExporter.image | string | `"quay.io/opstree/redis-exporter"` | | +| redisExporter.imagePullPolicy | string | `"IfNotPresent"` | | +| redisExporter.resources | object | `{}` | | +| redisExporter.tag | string | `"v1.44.0"` | | +| redisSentinel.clusterSize | int | `3` | | +| redisSentinel.ignoreAnnotations | list | `[]` | | +| redisSentinel.image | string | `"quay.io/opstree/redis-sentinel"` | | +| redisSentinel.imagePullPolicy | string | `"IfNotPresent"` | | +| redisSentinel.imagePullSecrets | list | `[]` | | +| redisSentinel.minReadySeconds | int | `0` | | +| redisSentinel.name | string | `""` | | +| redisSentinel.recreateStatefulSetOnUpdateInvalid | bool | `false` | Some fields of statefulset are immutable, such as volumeClaimTemplates. When set to true, the operator will delete the statefulset and recreate it. Default is false. | +| redisSentinel.redisSecret.secretKey | string | `""` | | +| redisSentinel.redisSecret.secretName | string | `""` | | +| redisSentinel.resources | object | `{}` | | +| redisSentinel.serviceType | string | `"ClusterIP"` | | +| redisSentinel.tag | string | `"v7.0.15"` | | +| redisSentinelConfig.downAfterMilliseconds | string | `""` | | +| redisSentinelConfig.failoverTimeout | string | `""` | | +| redisSentinelConfig.masterGroupName | string | `""` | | +| redisSentinelConfig.parallelSyncs | string | `""` | | +| redisSentinelConfig.quorum | string | `""` | | +| redisSentinelConfig.redisPort | string | `""` | | +| redisSentinelConfig.redisReplicationName | string | `"redis-replication"` | | +| redisSentinelConfig.redisReplicationPassword.secretKey | string | `""` | | +| redisSentinelConfig.redisReplicationPassword.secretName | string | `""` | | +| securityContext | object | `{}` | | +| serviceAccountName | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `"30s"` | | +| serviceMonitor.namespace | string | `"monitoring"` | | +| serviceMonitor.scrapeTimeout | string | `"10s"` | | +| sidecars.env | list | `[]` | | +| sidecars.image | string | `""` | | +| sidecars.imagePullPolicy | string | `"IfNotPresent"` | | +| sidecars.name | string | `""` | | +| sidecars.resources.limits.cpu | string | `"100m"` | | +| sidecars.resources.limits.memory | string | `"128Mi"` | | +| sidecars.resources.requests.cpu | string | `"50m"` | | +| sidecars.resources.requests.memory | string | `"64Mi"` | | +| tolerations | list | `[]` | | \ No newline at end of file From 0eac383fe2fa3367d90a8d39b4bcfc1e03301270 Mon Sep 17 00:00:00 2001 From: Nash Luffman Date: Wed, 13 Nov 2024 09:19:16 +0000 Subject: [PATCH 04/28] test: remove unnecessary waits from e2e tests (#1106) * remove unnecessary waits from e2e tests Signed-off-by: mluffman * small refactor of platforms in Makefile Signed-off-by: mluffman * fix * update * fix --------- Signed-off-by: mluffman Co-authored-by: mluffman Co-authored-by: drivebyer --- Makefile | 9 +++++---- .../v1beta2/redis-cluster-restart/ready-cluster.yaml | 2 ++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 9dc6ebe84..6aa892c05 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,7 @@ GOBIN=$(shell go env GOBIN) endif CONTAINER_ENGINE ?= docker +PLATFORMS="linux/arm64,linux/amd64" all: manager @@ -78,15 +79,15 @@ generate: controller-gen $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." docker-create: - ${CONTAINER_ENGINE} buildx create --platform "linux/amd64,linux/arm64" --use + ${CONTAINER_ENGINE} buildx create --platform $(PLATFORMS) --use # Build the docker image docker-build: - ${CONTAINER_ENGINE} buildx build --platform="linux/arm64,linux/amd64" -t ${IMG} . + ${CONTAINER_ENGINE} buildx build --platform=$(PLATFORMS) -t ${IMG} . # Push the docker image docker-push: - ${CONTAINER_ENGINE} buildx build --push --platform="linux/arm64,linux/amd64" -t ${IMG} . + ${CONTAINER_ENGINE} buildx build --push --platform="$(PLATFORMS)" -t ${IMG} . # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist @@ -114,7 +115,7 @@ bundle: manifests kustomize # Build the bundle image. .PHONY: bundle-build bundle-build: - ${CONTAINER_ENGINE} buildx build --platform="linux/arm64,linux/amd64" -f bundle.Dockerfile -t $(BUNDLE_IMG) . + ${CONTAINER_ENGINE} buildx build --platform="$(PLATFORMS)" -f bundle.Dockerfile -t $(BUNDLE_IMG) . # Generate bundle manifests and metadata, then validate generated files. .PHONY: codegen diff --git a/tests/e2e-chainsaw/v1beta2/redis-cluster-restart/ready-cluster.yaml b/tests/e2e-chainsaw/v1beta2/redis-cluster-restart/ready-cluster.yaml index 43462e53d..60c60ecfe 100644 --- a/tests/e2e-chainsaw/v1beta2/redis-cluster-restart/ready-cluster.yaml +++ b/tests/e2e-chainsaw/v1beta2/redis-cluster-restart/ready-cluster.yaml @@ -4,5 +4,7 @@ kind: RedisCluster metadata: name: redis-cluster-v1beta2 status: + readyFollowerReplicas: 0 + readyLeaderReplicas: 3 state: Ready reason: RedisCluster is ready From c6fcdb7f27c7a4589bd95cdad58cd6ddf3c3ab96 Mon Sep 17 00:00:00 2001 From: Shubham Gupta <69793468+shubham-cmyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 12:55:57 +0530 Subject: [PATCH 05/28] refactor(deps): bump go from 1.21 to 1.22 (#853) * refactor(deps): bump go from 1.21 to 1.22 Signed-off-by: Shubham Gupta * ci: bump go version Signed-off-by: Shubham Gupta * update --------- Signed-off-by: Shubham Gupta Co-authored-by: drivebyer --- .github/workflows/ci.yaml | 2 +- .golangci.yml | 2 +- Dockerfile | 2 +- go.mod | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a77faa8f6..a68bf24f3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -11,7 +11,7 @@ permissions: contents: read env: - GOLANG_VERSION: 1.21 + GOLANG_VERSION: 1.22 APPLICATION_NAME: redis-operator DockerImagName: docker.io/opstree/redis-operator BuildDocs: true diff --git a/.golangci.yml b/.golangci.yml index 1f6276d5a..1ff28d4d5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,7 +47,7 @@ linters: run: timeout: 15m - go: "1.21" + go: "1.22" tests: true show-stats: true skip-files: diff --git a/Dockerfile b/Dockerfile index 5a1575dae..ae57deaee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.21 AS builder +FROM golang:1.22 as builder ARG BUILDOS ARG BUILDPLATFORM ARG BUILDARCH diff --git a/go.mod b/go.mod index 0aac98ace..6dd0f7a46 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/OT-CONTAINER-KIT/redis-operator -go 1.21 +go 1.22 require ( github.com/banzaicloud/k8s-objectmatcher v1.8.0 From 38bc0277579c43fb77d472c02aceec9728d338e7 Mon Sep 17 00:00:00 2001 From: Taisuke Okamoto <34154552+b1gb4by@users.noreply.github.com> Date: Thu, 14 Nov 2024 17:17:09 +0900 Subject: [PATCH 06/28] doc: fix "Image Compatibility" (#1128) Signed-off-by: b1gb4by <34154552+b1gb4by@users.noreply.github.com> --- README.md | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a5e4b7fa0..b899eadba 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ A Golang based redis operator that will make/oversee Redis standalone and cluster mode setup on top of the Kubernetes. It can create a redis cluster setup with best practices on Cloud as well as the Bare metal environment. Also, it provides an in-built monitoring capability using redis-exporter. -For documentation, please refer to https://ot-redis-operator.netlify.app/ +For documentation, please refer to Organizations that are using Redis Operator to manage their redis workload can be found [here](./USED_BY_ORGANIZATIONS.md). If your organization is also using Redis Operator, please free to add by creating a [pull request](https://github.com/OT-CONTAINER-KIT/redis-operator/pulls) @@ -68,11 +68,15 @@ Redis operator requires a Kubernetes cluster of version `>=1.18.0`. If you have The following table shows the compatibility between the Operator Version, Redis Image, Sentinel Image, and Exporter Image: | Operator Version | Redis Image | Sentinel Image | Exporter Image | -|------------------|-------------|----------------|----------------| -| v0.15.0 | v7.0.12 | v7.0.12 | v1.48.0 | -| v0.15.0 | v7.0.11 | v7.0.11 | v1.48.0 | -| v0.14.0 | v7.0.7 | v7.0.7 | v1.48.0 | -| v0.13.0 | v6.2.5 | nil | v1.48.0 | +| ---------------- | ----------- | -------------- | -------------- | +| v0.18.1 | v7.0.12 | v7.0.12 | v1.44.0 | +| v0.18.0 | v7.0.12 | v7.0.12 | v1.44.0 | +| v0.17.0 | v7.0.12 | v7.0.12 | v1.44.0 | +| v0.16.0 | v7.0.12 | v7.0.12 | v1.44.0 | +| v0.15.1 | v7.0.12 | v7.0.12 | v1.44.0 | +| v0.15.0 | v7.0.11 | v7.0.11 | v1.44.0 | +| v0.14.0 | v7.0.7 | v7.0.7 | v1.44.0 | +| v0.13.0 | v6.2.5 | nil | v1.44.0 | ## Quickstart @@ -102,7 +106,7 @@ Creating redis cluster, standalone, replication and sentinel setup. ```shell # Create redis cluster setup $ helm upgrade redis-cluster ot-helm/redis-cluster \ - --set redisCluster.clusterSize=3 --install \ + --set redisCluster.clusterSize=3 --install \ --namespace ot-operators ``` From f72628d89a33b9dbc69fadc9577bc0a87aac6f46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Nov 2024 11:13:57 +0800 Subject: [PATCH 07/28] chore(deps): bump github.com/onsi/gomega from 1.33.1 to 1.35.1 (#1119) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.33.1 to 1.35.1. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.33.1...v1.35.1) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 22 +++++++++++----------- go.sum | 40 ++++++++++++++++++++-------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/go.mod b/go.mod index 6dd0f7a46..88b3a7f6b 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,12 @@ module github.com/OT-CONTAINER-KIT/redis-operator go 1.22 require ( + github.com/avast/retry-go v3.0.0+incompatible github.com/banzaicloud/k8s-objectmatcher v1.8.0 github.com/go-logr/logr v1.4.2 github.com/go-redis/redismock/v9 v9.2.0 - github.com/onsi/ginkgo/v2 v2.17.3 - github.com/onsi/gomega v1.33.1 + github.com/onsi/ginkgo/v2 v2.20.1 + github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 github.com/redis/go-redis/v9 v9.5.1 github.com/stretchr/testify v1.9.0 @@ -20,7 +21,6 @@ require ( require ( emperror.dev/errors v0.8.0 // indirect - github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -40,7 +40,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -60,17 +60,17 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect - golang.org/x/net v0.24.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index a2296eeec..fae2f852c 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -153,12 +153,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= -github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo= +github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -200,8 +200,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= -golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -221,8 +221,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= @@ -244,18 +244,18 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -269,8 +269,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -298,8 +298,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From 3135e0f4eb7cb22baa94babac1bcbf9e62fe40c1 Mon Sep 17 00:00:00 2001 From: yangw Date: Fri, 15 Nov 2024 17:45:01 +0800 Subject: [PATCH 08/28] feat: add context param and get logger from it (#1132) * rename Signed-off-by: drivebyer * add context param Signed-off-by: drivebyer * remove logger Signed-off-by: drivebyer * remove logger Signed-off-by: drivebyer * remove logger Signed-off-by: drivebyer * lint Signed-off-by: drivebyer * update * update --------- Signed-off-by: drivebyer --- main.go | 18 +- mocks/utils/utils.go | 3 +- pkg/controllers/redis/redis_controller.go | 34 +-- .../redis/redis_controller_suite_test.go | 2 +- .../rediscluster/rediscluster_controller.go | 146 +++++----- .../rediscluster_controller_suite_test.go | 5 +- .../redisreplication_controller.go | 58 ++-- .../redisreplication_controller_suite_test.go | 7 +- .../redissentinel/redissentinel_controller.go | 32 +-- pkg/controllerutil/controller_common.go | 15 +- pkg/k8sutils/cluster-scaling.go | 175 ++++++------ pkg/k8sutils/cluster-scaling_test.go | 13 +- pkg/k8sutils/finalizer.go | 40 +-- pkg/k8sutils/finalizer_test.go | 24 +- pkg/k8sutils/pod.go | 11 +- pkg/k8sutils/poddisruption.go | 88 +++--- pkg/k8sutils/redis-cluster.go | 64 +++-- pkg/k8sutils/redis-cluster_test.go | 11 +- pkg/k8sutils/redis-replication.go | 38 ++- pkg/k8sutils/redis-sentinel.go | 66 ++--- pkg/k8sutils/redis-sentinel_test.go | 10 +- pkg/k8sutils/redis-standalone.go | 24 +- pkg/k8sutils/redis.go | 262 +++++++++--------- pkg/k8sutils/redis_test.go | 38 +-- pkg/k8sutils/secrets.go | 21 +- pkg/k8sutils/secrets_test.go | 10 +- pkg/k8sutils/services.go | 51 ++-- pkg/k8sutils/services_test.go | 10 +- pkg/k8sutils/statefulset.go | 74 +++-- pkg/k8sutils/statefulset_test.go | 17 +- pkg/k8sutils/status.go | 15 +- 31 files changed, 632 insertions(+), 750 deletions(-) diff --git a/main.go b/main.go index 9d46df50b..576d16fbf 100644 --- a/main.go +++ b/main.go @@ -118,37 +118,32 @@ func main() { os.Exit(1) } - if err = (&redis.RedisReconciler{ + if err = (&redis.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("Redis"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Redis") os.Exit(1) } - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - if err = (&rediscluster.RedisClusterReconciler{ + if err = (&rediscluster.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rcLog, Scheme: mgr.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisCluster") os.Exit(1) } - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - if err = (&redisreplication.RedisReplicationReconciler{ + if err = (&redisreplication.Reconciler{ Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: rrLog, Scheme: mgr.GetScheme(), - Pod: k8sutils.NewPodService(k8sclient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rrLog), + Pod: k8sutils.NewPodService(k8sclient), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisReplication") os.Exit(1) @@ -157,7 +152,6 @@ func main() { Client: mgr.GetClient(), K8sClient: k8sclient, Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("RedisSentinel"), Scheme: mgr.GetScheme(), ReplicationWatcher: intctrlutil.NewResourceWatcher(), }).SetupWithManager(mgr); err != nil { diff --git a/mocks/utils/utils.go b/mocks/utils/utils.go index b48dad6a2..93bf3d90b 100644 --- a/mocks/utils/utils.go +++ b/mocks/utils/utils.go @@ -1,6 +1,7 @@ package utils import ( + "context" "fmt" "strconv" @@ -76,7 +77,7 @@ func CreateFakeObjectWithSecret(name, namespace, key string) []runtime.Object { return []runtime.Object{secret} } -func CreateFakeClientWithSecrets(cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { +func CreateFakeClientWithSecrets(ctx context.Context, cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset { leaderReplicas := cr.Spec.GetReplicaCounts("leader") followerReplicas := cr.Spec.GetReplicaCounts("follower") pods := make([]runtime.Object, 0) diff --git a/pkg/controllers/redis/redis_controller.go b/pkg/controllers/redis/redis_controller.go index 41f11d811..061f33e4c 100644 --- a/pkg/controllers/redis/redis_controller.go +++ b/pkg/controllers/redis/redis_controller.go @@ -23,7 +23,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -31,49 +30,46 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RedisReconciler reconciles a Redis object -type RedisReconciler struct { +// Reconciler reconciles a Redis object +type Reconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { instance := &redisv1beta2.Redis{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis finalizer") + if err = k8sutils.HandleRedisFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redis.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } - err = k8sutils.CreateStandaloneRedis(instance, r.K8sClient) + err = k8sutils.CreateStandaloneRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create redis") + return intctrlutil.RequeueWithError(ctx, err, "failed to create redis") } - err = k8sutils.CreateStandaloneService(instance, r.K8sClient) + err = k8sutils.CreateStandaloneService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to create service") + return intctrlutil.RequeueWithError(ctx, err, "failed to create service") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "requeue after 10 seconds") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "requeue after 10 seconds") } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.Redis{}). Complete(r) diff --git a/pkg/controllers/redis/redis_controller_suite_test.go b/pkg/controllers/redis/redis_controller_suite_test.go index a7a7b4b59..594bd2007 100644 --- a/pkg/controllers/redis/redis_controller_suite_test.go +++ b/pkg/controllers/redis/redis_controller_suite_test.go @@ -99,7 +99,7 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - err = (&RedisReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, diff --git a/pkg/controllers/rediscluster/rediscluster_controller.go b/pkg/controllers/rediscluster/rediscluster_controller.go index acfc56079..0c79a2a56 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller.go +++ b/pkg/controllers/rediscluster/rediscluster_controller.go @@ -26,42 +26,40 @@ import ( intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" retry "github.com/avast/retry-go" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// RedisClusterReconciler reconciles a RedisCluster object -type RedisClusterReconciler struct { +// Reconciler reconciles a RedisCluster object +type Reconciler struct { client.Client k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.V(1).Info("Reconciling opstree redis Cluster controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) instance := &redisv1beta2.RedisCluster{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "failed to get redis cluster instance") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get redis cluster instance") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisClusterFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to handle redis cluster finalizer") + if err = k8sutils.HandleRedisClusterFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to handle redis cluster finalizer") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["rediscluster.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } instance.SetDefault() @@ -69,83 +67,83 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request followerReplicas := instance.Spec.GetReplicaCounts("follower") totalReplicas := leaderReplicas + followerReplicas - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "failed to add finalizer") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisClusterFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "failed to add finalizer") } // Check if the cluster is downscaled - if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader"); leaderReplicas < leaderCount { - reqLogger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) + if leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader"); leaderReplicas < leaderCount { + logger.Info("Redis cluster is downscaling...", "Current.LeaderReplicas", leaderCount, "Desired.LeaderReplicas", leaderReplicas) for shardIdx := leaderCount - 1; shardIdx >= leaderReplicas; shardIdx-- { - reqLogger.Info("Remove the shard", "Shard.Index", shardIdx) + logger.Info("Remove the shard", "Shard.Index", shardIdx) // Imp if the last index of leader sts is not leader make it then // check whether the redis is leader or not ? // if not true then make it leader pod - if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, r.Log, instance)) { + if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, instance)) { // lastLeaderPod is slaving right now Make it the master Pod // We have to bring a manual failover here to make it a leaderPod // clusterFailover should also include the clusterReplicate since we have to map the followers to new leader - k8sutils.ClusterFailover(ctx, r.K8sClient, r.Log, instance) + k8sutils.ClusterFailover(ctx, r.K8sClient, instance) } // Step 1 Remove the Follower Node - k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, instance) // Step 2 Reshard the Cluster - k8sutils.ReshardRedisCluster(r.K8sClient, r.Log, instance, true) + k8sutils.ReshardRedisCluster(ctx, r.K8sClient, instance, true) } - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster") + logger.Info("Redis cluster is downscaled... Rebalancing the cluster") // Step 3 Rebalance the cluster - k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance) - reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + k8sutils.RebalanceRedisCluster(ctx, r.K8sClient, instance) + logger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // Mark the cluster status as initializing if there are no leader or follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyLeaderReplicas != leaderReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } if leaderReplicas != 0 { - err = k8sutils.CreateRedisLeaderService(instance, r.K8sClient) + err = k8sutils.CreateRedisLeaderService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - err = k8sutils.CreateRedisLeader(instance, r.K8sClient) + err = k8sutils.CreateRedisLeader(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") { // Mark the cluster status as initializing if there are no follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyFollowerReplicas != followerReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } // if we have followers create their service. if followerReplicas != 0 { - err = k8sutils.CreateRedisFollowerService(instance, r.K8sClient) + err = k8sutils.CreateRedisFollowerService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - err = k8sutils.CreateRedisFollower(instance, r.K8sClient) + err = k8sutils.CreateRedisFollower(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileRedisPodDisruptionBudget(ctx, instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } @@ -155,57 +153,57 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as bootstrapping if all the leader and follower nodes are ready if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } - if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, ""); nc != totalReplicas { - reqLogger.Info("Creating redis cluster by executing cluster creation commands") - leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader") + if nc := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, ""); nc != totalReplicas { + logger.Info("Creating redis cluster by executing cluster creation commands") + leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "leader") if leaderCount != leaderReplicas { - reqLogger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) + logger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) if leaderCount <= 2 { - k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, r.Log, instance) + k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, instance) } else { if leaderCount < leaderReplicas { // Scale up the cluster // Step 2 : Add Redis Node - k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, r.Log, instance) + k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, instance) // Step 3 Rebalance the cluster using the empty masters - k8sutils.RebalanceRedisClusterEmptyMasters(r.K8sClient, r.Log, instance) + k8sutils.RebalanceRedisClusterEmptyMasters(ctx, r.K8sClient, instance) } } } else { if followerReplicas > 0 { - reqLogger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) - k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, r.Log, instance) + logger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, instance) } else { - reqLogger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) + logger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) + return intctrlutil.RequeueAfter(ctx, time.Second*60, "Redis cluster count is not desired", "Current.Count", nc, "Desired.Count", totalReplicas) } - reqLogger.V(1).Info("Number of Redis nodes match desired") - unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + logger.Info("Number of Redis nodes match desired") + unhealthyNodeCount, err := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if err != nil { - reqLogger.Error(err, "failed to determine unhealthy node count in cluster") + logger.Error(err, "failed to determine unhealthy node count in cluster") } if int(totalReplicas) > 1 && unhealthyNodeCount >= int(totalReplicas)-1 { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - reqLogger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") - if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, r.Log, instance); err != nil { - reqLogger.Error(err, "failed to repair disconnected masters") + logger.Info("healthy leader count does not match desired; attempting to repair disconnected masters") + if err = k8sutils.RepairDisconnectedMasters(ctx, r.K8sClient, instance); err != nil { + logger.Error(err, "failed to repair disconnected masters") } err = retry.Do(func() error { - nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, r.Log, instance) + nc, nErr := k8sutils.UnhealthyNodesInCluster(ctx, r.K8sClient, instance) if nErr != nil { return nErr } @@ -216,34 +214,34 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request }, retry.Attempts(3), retry.Delay(time.Second*5)) if err == nil { - reqLogger.Info("repairing unhealthy masters successful, no unhealthy masters left") - return intctrlutil.RequeueAfter(reqLogger, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") + logger.Info("repairing unhealthy masters successful, no unhealthy masters left") + return intctrlutil.RequeueAfter(ctx, time.Second*30, "no unhealthy nodes found after repairing disconnected masters") } - reqLogger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") - if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + logger.Info("unhealthy nodes exist after attempting to repair disconnected masters; starting failover") + if err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } } // Check If there is No Empty Master Node - if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") == totalReplicas { - k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, r.Log, instance) + if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, instance, "") == totalReplicas { + k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, instance) } // Mark the cluster status as ready if all the leader and follower nodes are ready if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas { - if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, r.Log, instance) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) + if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, instance) { + err = k8sutils.UpdateRedisClusterStatus(ctx, instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } } } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } // SetupWithManager sets up the controller with the Manager. -func (r *RedisClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisCluster{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go index 5f9d5ef2d..8cfe65206 100644 --- a/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go +++ b/pkg/controllers/rediscluster/rediscluster_controller_suite_test.go @@ -100,13 +100,12 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") - err = (&RedisClusterReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rcLog), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index fb5955d6e..6b71bf56f 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -7,7 +7,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -15,76 +14,75 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// RedisReplicationReconciler reconciles a RedisReplication object -type RedisReplicationReconciler struct { +// Reconciler reconciles a RedisReplication object +type Reconciler struct { client.Client k8sutils.Pod k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme } -func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis replication controller") +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx, "Request.Namespace", req.Namespace, "Request.Name", req.Name) instance := &redisv1beta2.RedisReplication{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisReplicationFinalizer(r.Client, r.K8sClient, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.CreateReplicationRedis(instance, r.K8sClient) + err = k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.CreateReplicationService(instance, r.K8sClient) + err = k8sutils.CreateReplicationService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if !r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name) { return intctrlutil.Reconciled() } var realMaster string - masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "master") + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") if len(masterNodes) > 1 { - reqLogger.Info("Creating redis replication by executing replication creation commands") - slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "slave") - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + logger.Info("Creating redis replication by executing replication creation commands") + slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if len(slaveNodes) == 0 { realMaster = masterNodes[0] } - if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, r.Log, instance, masterNodes, realMaster); err != nil { - return intctrlutil.RequeueAfter(reqLogger, time.Second*60, "") + if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, instance, masterNodes, realMaster); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") } } - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, r.Log, instance, masterNodes) + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) if err = r.UpdateRedisReplicationMaster(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } if err = r.UpdateRedisPodRoleLabel(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } -func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisReplicationMaster(ctx context.Context, instance *redisv1beta2.RedisReplication, masterNode string) error { if instance.Status.MasterNode == masterNode { return nil } @@ -95,7 +93,7 @@ func (r *RedisReplicationReconciler) UpdateRedisReplicationMaster(ctx context.Co return nil } -func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { +func (r *Reconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1beta2.RedisReplication, masterNode string) error { labels := k8sutils.GetRedisReplicationLabels(cr) pods, err := r.ListPods(ctx, cr.GetNamespace(), labels) if err != nil { @@ -121,7 +119,7 @@ func (r *RedisReplicationReconciler) UpdateRedisPodRoleLabel(ctx context.Context } // SetupWithManager sets up the controller with the Manager. -func (r *RedisReplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&redisv1beta2.RedisReplication{}). Owns(&appsv1.StatefulSet{}). diff --git a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go index 0081b194e..445f7ce7d 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go +++ b/pkg/controllers/redisreplication/redisreplication_controller_suite_test.go @@ -100,14 +100,13 @@ var _ = BeforeSuite(func() { dk8sClient, err := dynamic.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred()) - rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication") - err = (&RedisReplicationReconciler{ + err = (&Reconciler{ Client: k8sManager.GetClient(), K8sClient: k8sClient, Dk8sClient: dk8sClient, Scheme: k8sManager.GetScheme(), - Pod: k8sutils.NewPodService(k8sClient, rrLog), - StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rrLog), + Pod: k8sutils.NewPodService(k8sClient), + StatefulSet: k8sutils.NewStatefulSetService(k8sClient), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/controllers/redissentinel/redissentinel_controller.go b/pkg/controllers/redissentinel/redissentinel_controller.go index feb1ba695..420730a4d 100644 --- a/pkg/controllers/redissentinel/redissentinel_controller.go +++ b/pkg/controllers/redissentinel/redissentinel_controller.go @@ -7,7 +7,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/pkg/controllerutil" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/k8sutils" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" @@ -21,41 +20,38 @@ type RedisSentinelReconciler struct { client.Client K8sClient kubernetes.Interface Dk8sClient dynamic.Interface - Log logr.Logger Scheme *runtime.Scheme ReplicationWatcher *intctrlutil.ResourceWatcher } func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - reqLogger.Info("Reconciling opstree redis controller") instance := &redisv1beta2.RedisSentinel{} err := r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(err, reqLogger, "") + return intctrlutil.RequeueWithErrorChecking(ctx, err, "") } if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisSentinelFinalizer(r.Client, r.Log, instance); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.HandleRedisSentinelFinalizer(ctx, r.Client, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } if _, found := instance.ObjectMeta.GetAnnotations()["redissentinel.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "found skip reconcile annotation") + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") } // Get total Sentinel Replicas // sentinelReplicas := instance.Spec.GetSentinelCounts("sentinel") - if err = k8sutils.AddFinalizer(instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisSentinelFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") } - if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, reqLogger, r.K8sClient, r.Dk8sClient, instance) { - return intctrlutil.RequeueAfter(reqLogger, time.Second*10, "Redis Replication is specified but not ready") + if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, r.K8sClient, r.Dk8sClient, instance) { + return intctrlutil.RequeueAfter(ctx, time.Second*10, "Redis Replication is specified but not ready") } if instance.Spec.RedisSentinelConfig != nil { @@ -70,20 +66,20 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Create Redis Sentinel - err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient, r.Dk8sClient) + err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, instance, r.K8sClient, r.Dk8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } - err = k8sutils.ReconcileSentinelPodDisruptionBudget(instance, instance.Spec.PodDisruptionBudget, r.K8sClient) + err = k8sutils.ReconcileSentinelPodDisruptionBudget(ctx, instance, instance.Spec.PodDisruptionBudget, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } // Create the Service for Redis Sentinel - err = k8sutils.CreateRedisSentinelService(instance, r.K8sClient) + err = k8sutils.CreateRedisSentinelService(ctx, instance, r.K8sClient) if err != nil { - return intctrlutil.RequeueWithError(err, reqLogger, "") + return intctrlutil.RequeueWithError(ctx, err, "") } return intctrlutil.Reconciled() } diff --git a/pkg/controllerutil/controller_common.go b/pkg/controllerutil/controller_common.go index ba063060b..2163b0291 100644 --- a/pkg/controllerutil/controller_common.go +++ b/pkg/controllerutil/controller_common.go @@ -1,10 +1,11 @@ package controllerutil import ( + "context" "time" - "github.com/go-logr/logr" apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -12,29 +13,29 @@ func Reconciled() (reconcile.Result, error) { return reconcile.Result{}, nil } -func RequeueAfter(logger logr.Logger, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueAfter(ctx context.Context, duration time.Duration, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { keysAndValues = append(keysAndValues, "duration", duration.String()) if msg == "" { msg = "requeue-after" } - logger.V(1).Info(msg, keysAndValues...) + log.FromContext(ctx).V(1).Info(msg, keysAndValues...) return reconcile.Result{ Requeue: true, RequeueAfter: duration, }, nil } -func RequeueWithError(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if msg == "" { msg = "requeue with error" } - logger.Error(err, msg, keysAndValues...) + log.FromContext(ctx).Error(err, msg, keysAndValues...) return reconcile.Result{}, err } -func RequeueWithErrorChecking(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { +func RequeueWithErrorChecking(ctx context.Context, err error, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { if apierrors.IsNotFound(err) { return Reconciled() } - return RequeueWithError(err, logger, msg, keysAndValues...) + return RequeueWithError(ctx, err, msg, keysAndValues...) } diff --git a/pkg/k8sutils/cluster-scaling.go b/pkg/k8sutils/cluster-scaling.go index 4b6edec9e..b2bd5a0da 100644 --- a/pkg/k8sutils/cluster-scaling.go +++ b/pkg/k8sutils/cluster-scaling.go @@ -7,21 +7,20 @@ import ( "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // ReshardRedisCluster transfer the slots from the last node to the first node. // // NOTE: when all slot been transferred, the node become slave of the first master node. -func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, remove bool) { - ctx := context.TODO() - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func ReshardRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, remove bool) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") // Transfer Pod details transferPOD := RedisDetails{ @@ -38,13 +37,13 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(transferPOD, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, transferPOD, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, transferPOD, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -55,41 +54,41 @@ func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *re //--cluster-from --cluster-to --cluster-slots --cluster-yes // Remove Node - removeNodeID := getRedisNodeID(ctx, client, logger, cr, removePOD) + removeNodeID := getRedisNodeID(ctx, client, cr, removePOD) cmd = append(cmd, "--cluster-from") cmd = append(cmd, removeNodeID) // Transfer Node - transferNodeID := getRedisNodeID(ctx, client, logger, cr, transferPOD) + transferNodeID := getRedisNodeID(ctx, client, cr, transferPOD) cmd = append(cmd, "--cluster-to") cmd = append(cmd, transferNodeID) // Cluster Slots - slot := getRedisClusterSlots(ctx, redisClient, logger, removeNodeID) + slot := getRedisClusterSlots(ctx, redisClient, removeNodeID) cmd = append(cmd, "--cluster-slots") cmd = append(cmd, slot) cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster reshard command is", "Command", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster reshard command is", "Command", cmd) if slot == "0" { - logger.V(1).Info("Skipped the execution of", "Cmd", cmd) + log.FromContext(ctx).V(1).Info("Skipped the execution of", "Cmd", cmd) return } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if remove { - RemoveRedisNodeFromCluster(ctx, client, logger, cr, removePOD) + RemoveRedisNodeFromCluster(ctx, client, cr, removePOD) } } -func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger logr.Logger, nodeID string) string { +func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, nodeID string) string { totalSlots := 0 redisSlots, err := redisClient.ClusterSlots(ctx).Result() if err != nil { - logger.Error(err, "Failed to Get Cluster Slots") + log.FromContext(ctx).Error(err, "Failed to Get Cluster Slots") return "" } for _, slot := range redisSlots { @@ -102,39 +101,39 @@ func getRedisClusterSlots(ctx context.Context, redisClient *redis.Client, logger } } - logger.V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) + log.FromContext(ctx).V(1).Info("Total cluster slots to be transferred from", "node", nodeID, "is", totalSlots) return strconv.Itoa(totalSlots) } // getRedisNodeID would return nodeID of a redis node by passing pod -func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { - redisClient := configureRedisClient(client, logger, cr, pod.PodName) +func getRedisNodeID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { + redisClient := configureRedisClient(ctx, client, cr, pod.PodName) defer redisClient.Close() pong, err := redisClient.Ping(ctx).Result() if err != nil || pong != "PONG" { - logger.Error(err, "Failed to ping Redis server") + log.FromContext(ctx).Error(err, "Failed to ping Redis server") return "" } cmd := redis.NewStringCmd(ctx, "cluster", "myid") err = redisClient.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return "" } - logger.V(1).Info("Redis node ID ", "is", output) + log.FromContext(ctx).V(1).Info("Redis node ID ", "is", output) return output } // Rebalance the Redis CLuster using the Empty Master Nodes -func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisClusterEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : --cluster-use-empty-masters -a var cmd []string pod := RedisDetails{ @@ -146,15 +145,15 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } cmd = append(cmd, "--cluster-use-empty-masters") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -162,13 +161,13 @@ func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr. cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } -func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, cr, "leader") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() for i := 0; i < int(totalRedisLeaderNodes); i++ { @@ -176,19 +175,19 @@ func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logge PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(i), Namespace: cr.Namespace, } - podNodeID := getRedisNodeID(ctx, client, logger, cr, pod) - podSlots := getRedisClusterSlots(ctx, redisClient, logger, podNodeID) + podNodeID := getRedisNodeID(ctx, client, cr, pod) + podSlots := getRedisClusterSlots(ctx, redisClient, podNodeID) if podSlots == "0" || podSlots == "" { - logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) - RebalanceRedisClusterEmptyMasters(client, logger, cr) + log.FromContext(ctx).V(1).Info("Found Empty Redis Leader Node", "pod", pod) + RebalanceRedisClusterEmptyMasters(ctx, client, cr) break } } } // Rebalance Redis Cluster Would Rebalance the Redis Cluster without using the empty masters -func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RebalanceRedisCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : -a var cmd []string pod := RedisDetails{ @@ -200,13 +199,13 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -214,14 +213,14 @@ func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr * cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster rebalance command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-1") + log.FromContext(ctx).V(1).Info("Redis cluster rebalance command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-1") } // Add redis cluster node would add a node to the existing redis cluster using redis-cli -func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - activeRedisNode := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + activeRedisNode := CheckRedisNodeCount(ctx, client, cr, "leader") newPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(int(activeRedisNode)), @@ -238,14 +237,14 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisHostname(newPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, newPod, *cr.Spec.Port)) - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, newPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -253,16 +252,16 @@ func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, log cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster add-node command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster add-node command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader -func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, logger logr.Logger, masterNodeID string) []string { +func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, masterNodeID string) []string { // 3acb029fead40752f432c84f9bed2e639119a573 192.168.84.239:6379@16379,redis-cluster-v1beta2-follower-5 slave e3299968586dd457a8dba04fc6c747cecd38510f 0 1713595736542 6 connected slaveNodes, err := redisClient.ClusterSlaves(ctx, masterNodeID).Result() if err != nil { - logger.Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) + log.FromContext(ctx).Error(err, "Failed to get attached follower node IDs", "masterNodeID", masterNodeID) return nil } slaveIDs := make([]string, 0, len(slaveNodes)) @@ -270,16 +269,16 @@ func getAttachedFollowerNodeIDs(ctx context.Context, redisClient *redis.Client, stringSlice := strings.Split(slave, " ") slaveIDs = append(slaveIDs, stringSlice[0]) } - logger.V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) + log.FromContext(ctx).V(1).Info("Slaves Nodes attached to", "node", masterNodeID, "are", slaveIDs) return slaveIDs } // Remove redis follower node would remove all follower nodes of last leader node using redis-cli -func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -293,39 +292,39 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes. cmd = []string{"redis-cli"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod) - followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, logger, lastLeaderPodNodeID) + lastLeaderPodNodeID := getRedisNodeID(ctx, client, cr, lastLeaderPod) + followerNodeIDs := getAttachedFollowerNodeIDs(ctx, redisClient, lastLeaderPodNodeID) cmd = append(cmd, "--cluster", "del-node") if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } for _, followerNodeID := range followerNodeIDs { cmd = append(cmd, followerNodeID) - logger.V(1).Info("Redis cluster follower remove command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster follower remove command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") cmd = cmd[:len(cmd)-1] } } // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli -func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { +func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, removePod RedisDetails) { var cmd []string - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - // currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") + // currentRedisCount := CheckRedisNodeCount(ctx, client, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -341,16 +340,16 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(existingPod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, existingPod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, existingPod, *cr.Spec.Port)) } - removePodNodeID := getRedisNodeID(ctx, client, logger, cr, removePod) + removePodNodeID := getRedisNodeID(ctx, client, cr, removePod) cmd = append(cmd, removePodNodeID) if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -358,26 +357,26 @@ func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd) - if getRedisClusterSlots(ctx, redisClient, logger, removePodNodeID) != "0" { - logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) + log.FromContext(ctx).V(1).Info("Redis cluster leader remove command is", "Command", cmd) + if getRedisClusterSlots(ctx, redisClient, removePodNodeID) != "0" { + log.FromContext(ctx).V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // verifyLeaderPod return true if the pod is leader/master -func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) - redisClient := configureRedisClient(client, logger, cr, podName) + redisClient := configureRedisClient(ctx, client, cr, podName) defer redisClient.Close() - return verifyLeaderPodInfo(ctx, redisClient, logger, podName) + return verifyLeaderPodInfo(ctx, redisClient, podName) } -func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) bool { +func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, podName string) bool { info, err := redisClient.Info(ctx, "replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return false } @@ -392,8 +391,8 @@ func verifyLeaderPodInfo(ctx context.Context, redisClient *redis.Client, logger return false } -func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { - slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) +func ClusterFailover(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { + slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, cr, "leader"))-1) // cmd = redis-cli cluster failover -a var cmd []string pod := RedisDetails{ @@ -406,13 +405,13 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(pod, cr, "leader")+fmt.Sprintf(":%d", *cr.Spec.Port)) } else { - cmd = append(cmd, getRedisServerAddress(client, logger, pod, *cr.Spec.Port)) + cmd = append(cmd, getRedisServerAddress(ctx, client, pod, *cr.Spec.Port)) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) @@ -420,6 +419,6 @@ func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger lo cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, slavePodName)...) - logger.V(1).Info("Redis cluster failover command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, slavePodName) + log.FromContext(ctx).V(1).Info("Redis cluster failover command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, slavePodName) } diff --git a/pkg/k8sutils/cluster-scaling_test.go b/pkg/k8sutils/cluster-scaling_test.go index fe1bd3bd0..69fa1fba0 100644 --- a/pkg/k8sutils/cluster-scaling_test.go +++ b/pkg/k8sutils/cluster-scaling_test.go @@ -5,15 +5,12 @@ import ( "fmt" "testing" - "github.com/go-logr/logr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" ) func Test_verifyLeaderPodInfo(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string section string @@ -52,7 +49,7 @@ func Test_verifyLeaderPodInfo(t *testing.T) { mock.ExpectInfo(tt.section).SetVal(tt.response) } - result := verifyLeaderPodInfo(ctx, client, logger, "test-pod") + result := verifyLeaderPodInfo(ctx, client, "test-pod") assert.Equal(t, tt.expectedBool, result, "Test case: "+tt.name) @@ -64,8 +61,6 @@ func Test_verifyLeaderPodInfo(t *testing.T) { } func Test_getRedisClusterSlots(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string nodeID string @@ -138,7 +133,7 @@ func Test_getRedisClusterSlots(t *testing.T) { mock.ExpectClusterSlots().SetVal(tt.clusterSlots) } - result := getRedisClusterSlots(ctx, client, logger, tt.nodeID) + result := getRedisClusterSlots(ctx, client, tt.nodeID) assert.Equal(t, tt.expectedResult, result, "Test case: "+tt.name) @@ -150,8 +145,6 @@ func Test_getRedisClusterSlots(t *testing.T) { } func Test_getAttachedFollowerNodeIDs(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string masterNodeID string @@ -209,7 +202,7 @@ func Test_getAttachedFollowerNodeIDs(t *testing.T) { mock.ExpectClusterSlaves(tt.masterNodeID).SetVal(tt.slaveNodeIDs) } - result := getAttachedFollowerNodeIDs(ctx, client, logger, tt.masterNodeID) + result := getAttachedFollowerNodeIDs(ctx, client, tt.masterNodeID) assert.ElementsMatch(t, tt.expectedslaveNodeIDs, result, "Test case: "+tt.name) diff --git a/pkg/k8sutils/finalizer.go b/pkg/k8sutils/finalizer.go index 338224698..6331fd319 100644 --- a/pkg/k8sutils/finalizer.go +++ b/pkg/k8sutils/finalizer.go @@ -5,13 +5,13 @@ import ( "fmt" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -22,17 +22,17 @@ const ( ) // HandleRedisFinalizer finalize resource if instance is marked to be deleted -func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func HandleRedisFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.Redis) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer", "finalizer", RedisFinalizer) return err } } @@ -41,17 +41,17 @@ func HandleRedisFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interfa } // HandleRedisClusterFinalizer finalize resource if instance is marked to be deleted -func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func HandleRedisClusterFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisClusterFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisClusterPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisClusterPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisClusterFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisClusterFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisClusterFinalizer) return err } } @@ -60,17 +60,17 @@ func HandleRedisClusterFinalizer(ctrlclient client.Client, k8sClient kubernetes. } // Handle RedisReplicationFinalizer finalize resource if instance is marked to be deleted -func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func HandleRedisReplicationFinalizer(ctx context.Context, ctrlclient client.Client, k8sClient kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisReplicationFinalizer) { if cr.Spec.Storage != nil && !cr.Spec.Storage.KeepAfterDelete { - if err := finalizeRedisReplicationPVC(k8sClient, logger, cr); err != nil { + if err := finalizeRedisReplicationPVC(ctx, k8sClient, cr); err != nil { return err } } controllerutil.RemoveFinalizer(cr, RedisReplicationFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisReplicationFinalizer) return err } } @@ -79,12 +79,12 @@ func HandleRedisReplicationFinalizer(ctrlclient client.Client, k8sClient kuberne } // HandleRedisSentinelFinalizer finalize resource if instance is marked to be deleted -func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { +func HandleRedisSentinelFinalizer(ctx context.Context, ctrlclient client.Client, cr *redisv1beta2.RedisSentinel) error { if cr.GetDeletionTimestamp() != nil { if controllerutil.ContainsFinalizer(cr, RedisSentinelFinalizer) { controllerutil.RemoveFinalizer(cr, RedisSentinelFinalizer) if err := ctrlclient.Update(context.TODO(), cr); err != nil { - logger.Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) + log.FromContext(ctx).Error(err, "Could not remove finalizer "+RedisSentinelFinalizer) return err } } @@ -93,7 +93,7 @@ func HandleRedisSentinelFinalizer(ctrlclient client.Client, logger logr.Logger, } // AddFinalizer add finalizer for graceful deletion -func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { +func AddFinalizer(ctx context.Context, cr client.Object, finalizer string, cl client.Client) error { if !controllerutil.ContainsFinalizer(cr, finalizer) { controllerutil.AddFinalizer(cr, finalizer) return cl.Update(context.TODO(), cr) @@ -102,26 +102,26 @@ func AddFinalizer(cr client.Object, finalizer string, cl client.Client) error { } // finalizeRedisPVC delete PVC -func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { +func finalizeRedisPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.Redis) error { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-0", pvcTemplateName, cr.Name) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) return err } return nil } // finalizeRedisClusterPVC delete PVCs -func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { +func finalizeRedisClusterPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { for _, role := range []string{"leader", "follower"} { for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name+"-"+role) PVCName := fmt.Sprintf("%s-%s-%s-%d", pvcTemplateName, cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -130,7 +130,7 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr PVCName := fmt.Sprintf("%s-%s-%s-%d", "node-conf", cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } @@ -140,13 +140,13 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr } // finalizeRedisReplicationPVC delete PVCs -func finalizeRedisReplicationPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { +func finalizeRedisReplicationPVC(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication) error { for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ { pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) PVCName := fmt.Sprintf("%s-%s-%d", pvcTemplateName, cr.Name, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { - logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) + log.FromContext(ctx).Error(err, "Could not delete Persistent Volume Claim "+PVCName) return err } } diff --git a/pkg/k8sutils/finalizer_test.go b/pkg/k8sutils/finalizer_test.go index a5324369b..4f46d8f7f 100644 --- a/pkg/k8sutils/finalizer_test.go +++ b/pkg/k8sutils/finalizer_test.go @@ -9,7 +9,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mockClient "github.com/OT-CONTAINER-KIT/redis-operator/mocks/client" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -132,7 +131,6 @@ func TestHandleRedisFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(tc.existingPVC.DeepCopyObject()) @@ -147,7 +145,7 @@ func TestHandleRedisFinalizer(t *testing.T) { assert.NoError(t, err) } - err := HandleRedisFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -262,7 +260,6 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -279,7 +276,7 @@ func TestHandleRedisClusterFinalizer(t *testing.T) { } } - err := HandleRedisClusterFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisClusterFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -461,7 +458,6 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVC != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVC)...) @@ -478,7 +474,7 @@ func TestHandleRedisReplicationFinalizer(t *testing.T) { } } - err := HandleRedisReplicationFinalizer(tc.mockClient, k8sClient, logger, tc.cr) + err := HandleRedisReplicationFinalizer(context.TODO(), tc.mockClient, k8sClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -557,8 +553,7 @@ func TestHandleRedisSentinelFinalizer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) - err := HandleRedisSentinelFinalizer(tc.mockClient, logger, tc.cr) + err := HandleRedisSentinelFinalizer(context.TODO(), tc.mockClient, tc.cr) if tc.expectError { assert.Error(t, err) } else { @@ -597,7 +592,6 @@ func TestFinalizeRedisPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) cr := &v1beta2.Redis{ ObjectMeta: metav1.ObjectMeta{ Name: "test-redis", @@ -618,7 +612,7 @@ func TestFinalizeRedisPVC(t *testing.T) { assert.NoError(t, err) } - err := finalizeRedisPVC(k8sClient, logger, cr) + err := finalizeRedisPVC(context.TODO(), k8sClient, cr) if tc.expectError { assert.Error(t, err) assert.Equal(t, tc.errorExpected, err) @@ -694,7 +688,6 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -702,7 +695,7 @@ func TestFinalizeRedisReplicationPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisReplicationPVC(k8sClient, logger, tc.redisReplication) + err := finalizeRedisReplicationPVC(context.TODO(), k8sClient, tc.redisReplication) if tc.expectError { assert.Error(t, err) } else { @@ -765,7 +758,6 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tc.existingPVCs != nil { k8sClient = k8sClientFake.NewSimpleClientset(helperToRuntimeObjects(tc.existingPVCs)...) @@ -773,7 +765,7 @@ func TestFinalizeRedisClusterPVC(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := finalizeRedisClusterPVC(k8sClient, logger, tc.redisCluster) + err := finalizeRedisClusterPVC(context.TODO(), k8sClient, tc.redisCluster) if tc.expectError { assert.Error(t, err) } else { @@ -886,7 +878,7 @@ func TestAddFinalizer(t *testing.T) { return nil }, } - err := AddFinalizer(tt.args.cr, tt.args.finalizer, mc) + err := AddFinalizer(context.TODO(), tt.args.cr, tt.args.finalizer, mc) if (err != nil) != tt.wantErr { t.Errorf("AddFinalizer() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/pkg/k8sutils/pod.go b/pkg/k8sutils/pod.go index e52b131fc..c79a95f86 100644 --- a/pkg/k8sutils/pod.go +++ b/pkg/k8sutils/pod.go @@ -6,11 +6,11 @@ import ( "fmt" "strings" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Pod interface { @@ -20,14 +20,11 @@ type Pod interface { type PodService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewPodService(kubeClient kubernetes.Interface, log logr.Logger) *PodService { - log = log.WithValues("service", "k8s.pod") +func NewPodService(kubeClient kubernetes.Interface) *PodService { return &PodService{ kubeClient: kubeClient, - log: log, } } @@ -48,7 +45,7 @@ type patchStringValue struct { } func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName string, labels map[string]string) error { - s.log.Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) + log.FromContext(ctx).V(1).Info("Patch pod labels", "namespace", namespace, "podName", podName, "labels", labels) var payloads []interface{} for labelKey, labelValue := range labels { @@ -63,7 +60,7 @@ func (s *PodService) PatchPodLabels(ctx context.Context, namespace, podName stri _, err := s.kubeClient.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) if err != nil { - s.log.Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) + log.FromContext(ctx).Error(err, "Patch pod labels failed", "namespace", namespace, "podName", podName) } return err } diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 4561b8cc9..6dc97a970 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -7,31 +7,30 @@ import ( commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateRedisLeaderPodDisruptionBudget check and create a PodDisruptionBudget for Leaders -func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-" + role - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, cluster, role, cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generatePodDisruptionBudgetDef(cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + pdbDef := generatePodDisruptionBudgetDef(ctx, cr, role, pdbMeta, cr.Spec.RedisLeader.PodDisruptionBudget) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -39,22 +38,21 @@ func ReconcileRedisPodDisruptionBudget(cr *redisv1beta2.RedisCluster, role strin } } -func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { +func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisSentinel, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { pdbName := cr.ObjectMeta.Name + "-sentinel" - logger := pdbLogger(cr.Namespace, pdbName) if pdbParams != nil && pdbParams.Enabled { labels := getRedisLabels(cr.ObjectMeta.Name, sentinel, "sentinel", cr.ObjectMeta.GetLabels()) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) - pdbDef := generateSentinelPodDisruptionBudgetDef(cr, "sentinel", pdbMeta, pdbParams) - return CreateOrUpdatePodDisruptionBudget(pdbDef, cl) + pdbDef := generateSentinelPodDisruptionBudgetDef(ctx, cr, "sentinel", pdbMeta, pdbParams) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(cr.Namespace, pdbName, cl) + _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { - return deletePodDisruptionBudget(cr.Namespace, pdbName, cl) + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { - logger.V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") // Its ok if its not found, as we're deleting anyway return nil } @@ -63,7 +61,7 @@ func ReconcileSentinelPodDisruptionBudget(cr *redisv1beta2.RedisSentinel, pdbPar } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generatePodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisCluster, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, @@ -90,7 +88,7 @@ func generatePodDisruptionBudgetDef(cr *redisv1beta2.RedisCluster, role string, } // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition -func generateSentinelPodDisruptionBudgetDef(cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { +func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), "role": role, @@ -117,25 +115,23 @@ func generateSentinelPodDisruptionBudgetDef(cr *redisv1beta2.RedisSentinel, role } // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdatePodDisruptionBudget(pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(pdbDef.Namespace, pdbDef.Name) - storedPDB, err := GetPodDisruptionBudget(pdbDef.Namespace, pdbDef.Name, cl) +func CreateOrUpdatePodDisruptionBudget(ctx context.Context, pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { + storedPDB, err := GetPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef.Name, cl) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(pdbDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } if errors.IsNotFound(err) { - return createPodDisruptionBudget(pdbDef.Namespace, pdbDef, cl) + return createPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef, cl) } return err } - return patchPodDisruptionBudget(storedPDB, pdbDef, pdbDef.Namespace, cl) + return patchPodDisruptionBudget(ctx, storedPDB, pdbDef, pdbDef.Namespace, cl) } // patchPodDisruptionBudget will patch Redis Kubernetes PodDisruptionBudgets -func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, storedPdb.Name) +func patchPodDisruptionBudget(ctx context.Context, storedPdb *policyv1.PodDisruptionBudget, newPdb *policyv1.PodDisruptionBudget, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newPdb.ResourceVersion = storedPdb.ResourceVersion newPdb.CreationTimestamp = storedPdb.CreationTimestamp @@ -151,11 +147,11 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p patch.IgnoreStatusFields(), ) if err != nil { - logger.Error(err, "Unable to patch redis PodDisruption with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruption with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", + log.FromContext(ctx).V(1).Info("Changes in PodDisruptionBudget Detected, Updating...", "patch", string(patchResult.Patch), "Current", string(patchResult.Current), "Original", string(patchResult.Original), @@ -167,67 +163,57 @@ func patchPodDisruptionBudget(storedPdb *policyv1.PodDisruptionBudget, newPdb *p } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newPdb); err != nil { - logger.Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") return err } - return updatePodDisruptionBudget(namespace, newPdb, cl) + return updatePodDisruptionBudget(ctx, namespace, newPdb, cl) } return nil } // createPodDisruptionBudget is a method to create PodDisruptionBudgets in Kubernetes -func createPodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func createPodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget creation failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget creation failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget creation was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget creation was successful") return nil } // updatePodDisruptionBudget is a method to update PodDisruptionBudgets in Kubernetes -func updatePodDisruptionBudget(namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdb.Name) +func updatePodDisruptionBudget(ctx context.Context, namespace string, pdb *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { _, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Update(context.TODO(), pdb, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruptionBudget update failed") + log.FromContext(ctx).Error(err, "Redis PodDisruptionBudget update failed") return err } - logger.V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget update was successful", "PDB.Spec", pdb.Spec) return nil } // deletePodDisruptionBudget is a method to delete PodDisruptionBudgets in Kubernetes -func deletePodDisruptionBudget(namespace string, pdbName string, cl kubernetes.Interface) error { - logger := pdbLogger(namespace, pdbName) +func deletePodDisruptionBudget(ctx context.Context, namespace string, pdbName string, cl kubernetes.Interface) error { err := cl.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdbName, metav1.DeleteOptions{}) if err != nil { - logger.Error(err, "Redis PodDisruption deletion failed") + log.FromContext(ctx).Error(err, "Redis PodDisruption deletion failed") return err } - logger.V(1).Info("Redis PodDisruption delete was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruption delete was successful") return nil } // GetPodDisruptionBudget is a method to get PodDisruptionBudgets in Kubernetes -func GetPodDisruptionBudget(namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { - logger := pdbLogger(namespace, pdb) +func GetPodDisruptionBudget(ctx context.Context, namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("PodDisruptionBudget", "policy/v1"), } pdbInfo, err := cl.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), pdb, getOpts) if err != nil { - logger.V(1).Info("Redis PodDisruptionBudget get action failed") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action failed") return nil, err } - logger.V(1).Info("Redis PodDisruptionBudget get action was successful") + log.FromContext(ctx).V(1).Info("Redis PodDisruptionBudget get action was successful") return pdbInfo, err } - -// pdbLogger will generate logging interface for PodDisruptionBudgets -func pdbLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.PodDisruptionBudget.Namespace", namespace, "Request.PodDisruptionBudget.Name", name) - return reqLogger -} diff --git a/pkg/k8sutils/redis-cluster.go b/pkg/k8sutils/redis-cluster.go index 7fe9515dd..beb6ac9b7 100644 --- a/pkg/k8sutils/redis-cluster.go +++ b/pkg/k8sutils/redis-cluster.go @@ -1,16 +1,17 @@ package k8sutils import ( + "context" "strconv" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisClusterSTS is a interface to call Redis Statefulset function @@ -32,7 +33,7 @@ type RedisClusterService struct { } // generateRedisClusterParams generates Redis cluster information -func generateRedisClusterParams(cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { +func generateRedisClusterParams(ctx context.Context, cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -105,7 +106,7 @@ func generateRedisClusterInitContainerParams(cr *redisv1beta2.RedisCluster) init } // generateRedisClusterContainerParams generates Redis container information -func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { +func generateRedisClusterContainerParams(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, role string) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -141,9 +142,9 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo nps := map[string]ports{} // pod name to ports replicas := cr.Spec.GetReplicaCounts(role) for i := 0; i < int(replicas); i++ { - svc, err := getService(cl, logger, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) + svc, err := getService(ctx, cl, cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) if err != nil { - log.Error(err, "Cannot get service for Redis", "Setup.Type", role) + log.FromContext(ctx).Error(err, "Cannot get service for Redis", "Setup.Type", role) } else { nps[svc.Name] = ports{ announcePort: int(svc.Spec.Ports[0].NodePort), @@ -211,7 +212,7 @@ func generateRedisClusterContainerParams(cl kubernetes.Interface, logger logr.Lo } // CreateRedisLeader will create a leader redis setup -func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeader(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "leader", SecurityContext: cr.Spec.RedisLeader.SecurityContext, @@ -225,11 +226,11 @@ func CreateRedisLeader(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) e if cr.Spec.RedisLeader.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisFollower will create a follower redis setup -func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollower(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterSTS{ RedisStateFulType: "follower", SecurityContext: cr.Spec.RedisFollower.SecurityContext, @@ -243,23 +244,23 @@ func CreateRedisFollower(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) if cr.Spec.RedisFollower.RedisConfig != nil { prop.ExternalConfig = cr.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig } - return prop.CreateRedisClusterSetup(cr, cl) + return prop.CreateRedisClusterSetup(ctx, cr, cl) } // CreateRedisLeaderService method will create service for Redis Leader -func CreateRedisLeaderService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisLeaderService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "leader", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } // CreateRedisFollowerService method will create service for Redis Follower -func CreateRedisFollowerService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func CreateRedisFollowerService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { prop := RedisClusterService{ RedisServiceRole: "follower", } - return prop.CreateRedisClusterService(cr, cl) + return prop.CreateRedisClusterService(ctx, cr, cl) } func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) int32 { @@ -267,34 +268,32 @@ func (service RedisClusterSTS) getReplicaCount(cr *redisv1beta2.RedisCluster) in } // CreateRedisClusterSetup will create Redis Setup for leader and follower -func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterSTS) CreateRedisClusterSetup(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType - logger := statefulSetLogger(cr.Namespace, stateFulName) labels := getRedisLabels(stateFulName, cluster, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, - generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service), + generateRedisClusterParams(ctx, cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), - generateRedisClusterContainerParams(cl, logger, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), + generateRedisClusterContainerParams(ctx, cl, cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) + log.FromContext(ctx).Error(err, "Cannot create statefulset for Redis", "Setup.Type", service.RedisStateFulType) return err } return nil } // CreateRedisClusterService method will create service for Redis -func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) CreateRedisClusterService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, cluster, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -309,40 +308,39 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, true, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), epp, false, "ClusterIP", *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } additionalServiceType := cr.Spec.KubernetesConfig.GetServiceType() if additionalServiceType == "NodePort" { // If NodePort is enabled, we need to create a service for every redis pod. // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. - err = service.createOrUpdateClusterNodePortService(cr, cl) + err = service.createOrUpdateClusterNodePortService(ctx, cr, cl) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } - err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port, cl) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil } -func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { +func (service RedisClusterService) createOrUpdateClusterNodePortService(ctx context.Context, cr *redisv1beta2.RedisCluster, cl kubernetes.Interface) error { replicas := cr.Spec.GetReplicaCounts(service.RedisServiceRole) for i := 0; i < int(replicas); i++ { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole + "-" + strconv.Itoa(i) - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(cr.ObjectMeta.Name+"-"+service.RedisServiceRole, cluster, service.RedisServiceRole, map[string]string{ "statefulset.kubernetes.io/pod-name": serviceName, }) @@ -357,9 +355,9 @@ func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redi IntVal: int32(*cr.Spec.Port + 10000), }, } - err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) + err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, cl, busPort) if err != nil { - logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) return err } } diff --git a/pkg/k8sutils/redis-cluster_test.go b/pkg/k8sutils/redis-cluster_test.go index 65c99bc4a..90993475d 100644 --- a/pkg/k8sutils/redis-cluster_test.go +++ b/pkg/k8sutils/redis-cluster_test.go @@ -1,13 +1,13 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -165,7 +165,7 @@ func Test_generateRedisClusterParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actualLeaderSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualLeaderSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "leader", ExternalConfig: input.Spec.RedisLeader.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisLeader.SecurityContext, @@ -178,7 +178,7 @@ func Test_generateRedisClusterParams(t *testing.T) { }) assert.EqualValues(t, expectedLeaderSTS, actualLeaderSTS, "Expected %+v, got %+v", expectedLeaderSTS, actualLeaderSTS) - actualFollowerSTS := generateRedisClusterParams(input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ + actualFollowerSTS := generateRedisClusterParams(context.TODO(), input, *input.Spec.Size, input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, RedisClusterSTS{ RedisStateFulType: "follower", ExternalConfig: input.Spec.RedisFollower.RedisConfig.AdditionalRedisConfig, SecurityContext: input.Spec.RedisFollower.SecurityContext, @@ -430,12 +430,11 @@ func Test_generateRedisClusterContainerParams(t *testing.T) { if err != nil { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - logger := testr.New(t) - actualLeaderContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") + actualLeaderContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") assert.EqualValues(t, expectedLeaderContainer, actualLeaderContainer, "Expected %+v, got %+v", expectedLeaderContainer, actualLeaderContainer) - actualFollowerContainer := generateRedisClusterContainerParams(fake.NewSimpleClientset(), logger, input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") + actualFollowerContainer := generateRedisClusterContainerParams(context.TODO(), fake.NewSimpleClientset(), input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") assert.EqualValues(t, expectedFollowerContainer, actualFollowerContainer, "Expected %+v, got %+v", expectedFollowerContainer, actualFollowerContainer) } diff --git a/pkg/k8sutils/redis-replication.go b/pkg/k8sutils/redis-replication.go index d1287e4f6..ad6a84128 100644 --- a/pkg/k8sutils/redis-replication.go +++ b/pkg/k8sutils/redis-replication.go @@ -5,15 +5,14 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateReplicationService method will create replication service for Redis -func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateReplicationService(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) epp := disableMetrics @@ -37,24 +36,24 @@ func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.I masterObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-master", cr.Namespace, masterLabels, annotations) replicaObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-replica", cr.Namespace, replicaLabels, annotations) - if err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication headless service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication headless service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replication service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisReplicationAsOwner(cr), epp, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replication service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { - logger.Error(err, "Cannot create additional service for Redis Replication") + if err := CreateOrUpdateService(ctx, cr.Namespace, additionalObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, cr.Spec.KubernetesConfig.GetServiceType(), redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis Replication") return err } - if err := CreateOrUpdateService(cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create master service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, masterObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create master service for Redis") return err } - if err := CreateOrUpdateService(cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { - logger.Error(err, "Cannot create replica service for Redis") + if err := CreateOrUpdateService(ctx, cr.Namespace, replicaObjectMetaInfo, redisReplicationAsOwner(cr), disableMetrics, false, "ClusterIP", redisPort, cl); err != nil { + log.FromContext(ctx).Error(err, "Cannot create replica service for Redis") return err } @@ -62,16 +61,15 @@ func CreateReplicationService(cr *redisv1beta2.RedisReplication, cl kubernetes.I } // CreateReplicationRedis will create a replication redis setup -func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { +func CreateReplicationRedis(ctx context.Context, cr *redisv1beta2.RedisReplication, cl kubernetes.Interface) error { stateFulName := cr.ObjectMeta.Name - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisReplicationParams(cr), @@ -81,7 +79,7 @@ func CreateReplicationRedis(cr *redisv1beta2.RedisReplication, cl kubernetes.Int cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create replication statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create replication statefulset for Redis") return err } return nil @@ -216,9 +214,9 @@ func generateRedisReplicationInitContainerParams(cr *redisv1beta2.RedisReplicati return initcontainerProp } -func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { +func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, dClient dynamic.Interface, rs *redisv1beta2.RedisSentinel) bool { // statefulset name the same as the redis replication name - sts, err := GetStatefulSet(client, logger, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) + sts, err := GetStatefulSet(ctx, client, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName) if err != nil { return false } @@ -234,7 +232,7 @@ func IsRedisReplicationReady(ctx context.Context, logger logr.Logger, client kub // Enhanced check: When the pod is ready, it may not have been // created as part of a replication cluster, so we should verify // whether there is an actual master node. - if master := getRedisReplicationMasterIP(ctx, client, logger, rs, dClient); master == "" { + if master := getRedisReplicationMasterIP(ctx, client, rs, dClient); master == "" { return false } return true diff --git a/pkg/k8sutils/redis-sentinel.go b/pkg/k8sutils/redis-sentinel.go index 3d2f4ba6b..4b1a56ff2 100644 --- a/pkg/k8sutils/redis-sentinel.go +++ b/pkg/k8sutils/redis-sentinel.go @@ -7,13 +7,13 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisSentinelSTS is a interface to call Redis Statefulset function @@ -36,7 +36,7 @@ type RedisReplicationObject struct { } // Redis Sentinel Create the Redis Sentinel Setup -func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { prop := RedisSentinelSTS{ RedisStateFulType: "sentinel", Affinity: cr.Spec.Affinity, @@ -49,43 +49,43 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig } - return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl, dcl) + return prop.CreateRedisSentinelSetup(ctx, client, cr, cl, dcl) } // Create RedisSentinel Service -func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { prop := RedisSentinelService{ RedisServiceRole: "sentinel", } - return prop.CreateRedisSentinelService(cr, cl) + return prop.CreateRedisSentinelService(ctx, cr, cl) } // Create Redis Sentinel Cluster Setup -func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { +func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, - generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), + generateRedisSentinelParams(ctx, cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), - generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), + generateRedisSentinelContainerParams(ctx, client, cr, service.ReadinessProbe, service.LivenessProbe, dcl), cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create Sentinel statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create Sentinel statefulset for Redis") return err } return nil } // Create Redis Sentile Params for the statefulset -func generateRedisSentinelParams(cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { +func generateRedisSentinelParams(ctx context.Context, cr *redisv1beta2.RedisSentinel, replicas int32, externalConfig *string, affinity *corev1.Affinity) statefulSetParameters { var minreadyseconds int32 = 0 if cr.Spec.KubernetesConfig.MinReadySeconds != nil { minreadyseconds = *cr.Spec.KubernetesConfig.MinReadySeconds @@ -148,7 +148,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in } // Create Redis Sentinel Statefulset Container Params -func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { +func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, readinessProbeDef *corev1.Probe, livenessProbeDef *corev1.Probe, dcl dynamic.Interface) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -158,7 +158,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes Resources: cr.Spec.KubernetesConfig.Resources, SecurityContext: cr.Spec.SecurityContext, Port: ptr.To(sentinelPort), - AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr, dcl), + AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, cr, dcl), } if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars @@ -206,9 +206,8 @@ func (service RedisSentinelSTS) getSentinelCount(cr *redisv1beta2.RedisSentinel) } // Create the Service for redis sentinel -func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func (service RedisSentinelService) CreateRedisSentinelService(ctx context.Context, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole - logger := serviceLogger(cr.Namespace, serviceName) labels := getRedisLabels(serviceName, sentinel, service.RedisServiceRole, cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -224,18 +223,19 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. headlessObjectMetaInfo := generateObjectMetaInformation(serviceName+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(serviceName+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisSentinelAsOwner(cr), disableMetrics, true, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create headless service for Redis", "Setup.Type", service.RedisServiceRole) return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisSentinelAsOwner(cr), epp, false, "ClusterIP", sentinelPort, cl) if err != nil { - logger.Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create service for Redis", "Setup.Type", service.RedisServiceRole) return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisSentinelAsOwner(cr), @@ -246,13 +246,13 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis", "Setup.Type", service.RedisServiceRole) return err } return nil } -func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { +func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { if cr.Spec.RedisSentinelConfig == nil { return &[]corev1.EnvVar{} } @@ -264,7 +264,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo }, { Name: "IP", - Value: getRedisReplicationMasterIP(ctx, client, logger, cr, dcl), + Value: getRedisReplicationMasterIP(ctx, client, cr, dcl), }, { Name: "PORT", @@ -297,7 +297,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo return envVar } -func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { +func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { replicationName := cr.Spec.RedisSentinelConfig.RedisReplicationName replicationNamespace := cr.Namespace @@ -312,41 +312,41 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac }).Namespace(replicationNamespace).Get(context.TODO(), replicationName, v1.GetOptions{}) if err != nil { - logger.Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).Error(err, "Failed to Execute Get Request", "replication name", replicationName, "namespace", replicationNamespace) return "" } else { - logger.V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) + log.FromContext(ctx).V(1).Info("Successfully Execute the Get Request", "replication name", replicationName, "namespace", replicationNamespace) } // Marshal CustomObject to JSON replicationJSON, err := customObject.MarshalJSON() if err != nil { - logger.Error(err, "Failed To Load JSON") + log.FromContext(ctx).Error(err, "Failed To Load JSON") return "" } // Unmarshal The JSON on Object if err := json.Unmarshal(replicationJSON, &replicationInstance); err != nil { - logger.Error(err, "Failed To Unmarshal JSON over the Object") + log.FromContext(ctx).Error(err, "Failed To Unmarshal JSON over the Object") return "" } - masterPods := GetRedisNodesByRole(ctx, client, logger, &replicationInstance, "master") + masterPods := GetRedisNodesByRole(ctx, client, &replicationInstance, "master") if len(masterPods) == 0 { - logger.Error(errors.New("no master pods found"), "") + log.FromContext(ctx).Error(errors.New("no master pods found"), "") return "" } for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, &replicationInstance, podName) + redisClient := configureRedisReplicationClient(ctx, client, &replicationInstance, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { realMasterPod = podName break } } if realMasterPod == "" { - logger.Error(errors.New("no real master pod found"), "") + log.FromContext(ctx).Error(errors.New("no real master pod found"), "") return "" } @@ -354,5 +354,5 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac PodName: realMasterPod, Namespace: replicationNamespace, } - return getRedisServerIP(client, logger, realMasterInfo) + return getRedisServerIP(ctx, client, realMasterInfo) } diff --git a/pkg/k8sutils/redis-sentinel_test.go b/pkg/k8sutils/redis-sentinel_test.go index 76b467c4c..5f2160748 100644 --- a/pkg/k8sutils/redis-sentinel_test.go +++ b/pkg/k8sutils/redis-sentinel_test.go @@ -9,7 +9,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -83,7 +82,7 @@ func Test_generateRedisSentinelParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelParams(input, *input.Spec.Size, nil, input.Spec.Affinity) + actual := generateRedisSentinelParams(context.TODO(), input, *input.Spec.Size, nil, input.Spec.Affinity) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -208,7 +207,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil, nil) + actual := generateRedisSentinelContainerParams(context.TODO(), nil, input, nil, nil, nil) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -292,7 +291,6 @@ func Test_generateRedisSentinelInitContainerParams(t *testing.T) { func Test_getSentinelEnvVariable(t *testing.T) { type args struct { client kubernetes.Interface - logger logr.Logger cr *redisv1beta2.RedisSentinel } tests := []struct { @@ -304,7 +302,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{}, }, want: &[]corev1.EnvVar{}, @@ -313,7 +310,6 @@ func Test_getSentinelEnvVariable(t *testing.T) { name: "When RedisSentinelConfig is not nil", args: args{ client: nil, - logger: logr.Logger{}, cr: &redisv1beta2.RedisSentinel{ Spec: redisv1beta2.RedisSentinelSpec{ RedisSentinelConfig: &redisv1beta2.RedisSentinelConfig{ @@ -364,7 +360,7 @@ func Test_getSentinelEnvVariable(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := context.TODO() - if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.logger, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { + if got := getSentinelEnvVariable(ctx, tt.args.client, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { t.Errorf("getSentinelEnvVariable() = %v, want %v", got, tt.want) } }) diff --git a/pkg/k8sutils/redis-standalone.go b/pkg/k8sutils/redis-standalone.go index 05a911760..11bf31ac1 100644 --- a/pkg/k8sutils/redis-standalone.go +++ b/pkg/k8sutils/redis-standalone.go @@ -1,15 +1,17 @@ package k8sutils import ( + "context" + redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) // CreateStandaloneService method will create standalone service for Redis -func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := serviceLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateStandaloneService(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) var epp exporterPortProvider if cr.Spec.RedisExporter != nil { @@ -24,17 +26,18 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) headlessObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-headless", cr.Namespace, labels, annotations) additionalObjectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name+"-additional", cr.Namespace, labels, generateServiceAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.GetServiceAnnotations(), epp)) - err := CreateOrUpdateService(cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) + err := CreateOrUpdateService(ctx, cr.Namespace, headlessObjectMetaInfo, redisAsOwner(cr), disableMetrics, true, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone headless service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone headless service for Redis") return err } - err = CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) + err = CreateOrUpdateService(ctx, cr.Namespace, objectMetaInfo, redisAsOwner(cr), epp, false, "ClusterIP", redisPort, cl) if err != nil { - logger.Error(err, "Cannot create standalone service for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone service for Redis") return err } err = CreateOrUpdateService( + ctx, cr.Namespace, additionalObjectMetaInfo, redisAsOwner(cr), @@ -45,21 +48,20 @@ func CreateStandaloneService(cr *redisv1beta2.Redis, cl kubernetes.Interface) er cl, ) if err != nil { - logger.Error(err, "Cannot create additional service for Redis") + log.FromContext(ctx).Error(err, "Cannot create additional service for Redis") return err } return nil } // CreateStandaloneRedis will create a standalone redis setup -func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) error { - logger := statefulSetLogger(cr.Namespace, cr.ObjectMeta.Name) +func CreateStandaloneRedis(ctx context.Context, cr *redisv1beta2.Redis, cl kubernetes.Interface) error { labels := getRedisLabels(cr.ObjectMeta.Name, standalone, "standalone", cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(cr.ObjectMeta.Name, cr.Namespace, labels, annotations) err := CreateOrUpdateStateFul( + ctx, cl, - logger, cr.GetNamespace(), objectMetaInfo, generateRedisStandaloneParams(cr), @@ -69,7 +71,7 @@ func CreateStandaloneRedis(cr *redisv1beta2.Redis, cl kubernetes.Interface) erro cr.Spec.Sidecars, ) if err != nil { - logger.Error(err, "Cannot create standalone statefulset for Redis") + log.FromContext(ctx).Error(err, "Cannot create standalone statefulset for Redis") return err } return nil diff --git a/pkg/k8sutils/redis.go b/pkg/k8sutils/redis.go index 5f65e5db3..ea561e634 100644 --- a/pkg/k8sutils/redis.go +++ b/pkg/k8sutils/redis.go @@ -10,13 +10,13 @@ import ( "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" + "sigs.k8s.io/controller-runtime/pkg/log" ) // RedisDetails will hold the information for Redis Pod @@ -26,35 +26,35 @@ type RedisDetails struct { } // getRedisServerIP will return the IP of redis service -func getRedisServerIP(client kubernetes.Interface, logger logr.Logger, redisInfo RedisDetails) string { - logger.V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) +func getRedisServerIP(ctx context.Context, client kubernetes.Interface, redisInfo RedisDetails) string { + log.FromContext(ctx).V(1).Info("Fetching Redis pod", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) redisPod, err := client.CoreV1().Pods(redisInfo.Namespace).Get(context.TODO(), redisInfo.PodName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).Error(err, "Error in getting Redis pod IP", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } redisIP := redisPod.Status.PodIP - logger.V(1).Info("Fetched Redis pod IP", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Fetched Redis pod IP", "ip", redisIP) // Check if IP is empty if redisIP == "" { - logger.V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) + log.FromContext(ctx).V(1).Info("Redis pod IP is empty", "namespace", redisInfo.Namespace, "podName", redisInfo.PodName) return "" } // If we're NOT IPv4, assume we're IPv6.. if net.ParseIP(redisIP).To4() == nil { - logger.V(1).Info("Redis is using IPv6", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Redis is using IPv6", "ip", redisIP) } - logger.V(1).Info("Successfully got the IP for Redis", "ip", redisIP) + log.FromContext(ctx).V(1).Info("Successfully got the IP for Redis", "ip", redisIP) return redisIP } -func getRedisServerAddress(client kubernetes.Interface, logger logr.Logger, rd RedisDetails, port int) string { - ip := getRedisServerIP(client, logger, rd) +func getRedisServerAddress(ctx context.Context, client kubernetes.Interface, rd RedisDetails, port int) string { + ip := getRedisServerIP(ctx, client, rd) format := "%s:%d" // if ip is IPv6, wrap it in brackets @@ -72,12 +72,12 @@ func getRedisHostname(redisInfo RedisDetails, cr *redisv1beta2.RedisCluster, rol } // CreateSingleLeaderRedisCommand will create command for single leader cluster creation -func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateSingleLeaderRedisCommand(ctx context.Context, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "CLUSTER", "ADDSLOTS"} for i := 0; i < 16384; i++ { cmd = append(cmd, strconv.Itoa(i)) } - logger.V(1).Info("Generating Redis Add Slots command for single node cluster", + log.FromContext(ctx).V(1).Info("Generating Redis Add Slots command for single node cluster", "BaseCommand", cmd[:3], "SlotsRange", "0-16383", "TotalSlots", 16384) @@ -87,14 +87,14 @@ func CreateSingleLeaderRedisCommand(logger logr.Logger, cr *redisv1beta2.RedisCl // RepairDisconnectedMasters attempts to repair disconnected/failed masters by issuing // a CLUSTER MEET with the updated address of the host -func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RepairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - return repairDisconnectedMasters(ctx, client, logger, cr, redisClient) + return repairDisconnectedMasters(ctx, client, cr, redisClient) } -func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { - nodes, err := clusterNodes(ctx, redisClient, logger) +func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, redisClient *redis.Client) error { + nodes, err := clusterNodes(ctx, redisClient) if err != nil { return err } @@ -106,12 +106,11 @@ func repairDisconnectedMasters(ctx context.Context, client kubernetes.Interface, if !nodeFailedOrDisconnected(node) { continue } - log.V(1).Info("found disconnected master node", "node", node) podName, err := getMasterHostFromClusterNode(node) if err != nil { return err } - ip := getRedisServerIP(client, logger, RedisDetails{ + ip := getRedisServerIP(ctx, client, RedisDetails{ PodName: podName, Namespace: cr.Namespace, }) @@ -133,7 +132,7 @@ func getMasterHostFromClusterNode(node clusterNodesResponse) (string, error) { } // CreateMultipleLeaderRedisCommand will create command for single leader cluster creation -func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) []string { +func CreateMultipleLeaderRedisCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) []string { cmd := []string{"redis-cli", "--cluster", "create"} replicas := cr.Spec.GetReplicaCounts("leader") @@ -143,42 +142,42 @@ func CreateMultipleLeaderRedisCommand(client kubernetes.Interface, logger logr.L if cr.Spec.ClusterVersion != nil && *cr.Spec.ClusterVersion == "v7" { address = getRedisHostname(RedisDetails{PodName: podName, Namespace: cr.Namespace}, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - address = getRedisServerAddress(client, logger, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) + address = getRedisServerAddress(ctx, client, RedisDetails{PodName: podName, Namespace: cr.Namespace}, *cr.Spec.Port) } cmd = append(cmd, address) } cmd = append(cmd, "--cluster-yes") - logger.V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) + log.FromContext(ctx).V(1).Info("Redis cluster creation command", "CommandBase", cmd[:3], "Replicas", replicas) return cmd } // ExecuteRedisClusterCommand will execute redis cluster creation command -func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var cmd []string replicas := cr.Spec.GetReplicaCounts("leader") switch int(replicas) { case 1: - err := executeFailoverCommand(ctx, client, logger, cr, "leader") + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "error executing failover command") + log.FromContext(ctx).Error(err, "error executing failover command") } - cmd = CreateSingleLeaderRedisCommand(logger, cr) + cmd = CreateSingleLeaderRedisCommand(ctx, cr) default: - cmd = CreateMultipleLeaderRedisCommand(client, logger, cr) + cmd = CreateMultipleLeaderRedisCommand(ctx, client, cr) } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - logger.V(1).Info("Redis cluster creation command is", "Command", cmd) - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + log.FromContext(ctx).V(1).Info("Redis cluster creation command is", "Command", cmd) + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []string { @@ -194,7 +193,7 @@ func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []str } // createRedisReplicationCommand will create redis replication creation command -func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { +func createRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { cmd := []string{"redis-cli", "--cluster", "add-node"} var followerAddress, leaderAddress string @@ -202,16 +201,16 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg followerAddress = getRedisHostname(followerPod, cr, "follower") + fmt.Sprintf(":%d", *cr.Spec.Port) leaderAddress = getRedisHostname(leaderPod, cr, "leader") + fmt.Sprintf(":%d", *cr.Spec.Port) } else { - followerAddress = getRedisServerAddress(client, logger, followerPod, *cr.Spec.Port) - leaderAddress = getRedisServerAddress(client, logger, leaderPod, *cr.Spec.Port) + followerAddress = getRedisServerAddress(ctx, client, followerPod, *cr.Spec.Port) + leaderAddress = getRedisServerAddress(ctx, client, leaderPod, *cr.Spec.Port) } cmd = append(cmd, followerAddress, leaderAddress, "--cluster-slave") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) + log.FromContext(ctx).Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) } else { cmd = append(cmd, "-a", pass) } @@ -219,7 +218,7 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...) - logger.V(1).Info("Generated Redis replication command", + log.FromContext(ctx).V(1).Info("Generated Redis replication command", "FollowerAddress", followerAddress, "LeaderAddress", leaderAddress, "Command", cmd) @@ -227,18 +226,18 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg } // ExecuteRedisReplicationCommand will execute the replication command -func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) { var podIP string followerCounts := cr.Spec.GetReplicaCounts("follower") leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - nodes, err := clusterNodes(ctx, redisClient, logger) + nodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } for followerIdx := 0; followerIdx <= int(followerCounts)-1; { for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ { @@ -250,24 +249,24 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa((followerIdx)%int(leaderCounts)), Namespace: cr.Namespace, } - podIP = getRedisServerIP(client, logger, followerPod) - if !checkRedisNodePresence(cr, nodes, podIP) { - logger.V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) - cmd := createRedisReplicationCommand(client, logger, cr, leaderPod, followerPod) - redisClient := configureRedisClient(client, logger, cr, followerPod.PodName) + podIP = getRedisServerIP(ctx, client, followerPod) + if !checkRedisNodePresence(ctx, cr, nodes, podIP) { + log.FromContext(ctx).V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) + cmd := createRedisReplicationCommand(ctx, client, cr, leaderPod, followerPod) + redisClient := configureRedisClient(ctx, client, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() redisClient.Close() if err != nil { - logger.Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) + log.FromContext(ctx).Error(err, "Failed to ping Redis server", "Follower.Pod", followerPod) continue } if pong == "PONG" { - executeCommand(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + executeCommand(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") } else { - logger.V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping execution of command due to failed Redis ping", "Follower.Pod", followerPod) } } else { - logger.V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) + log.FromContext(ctx).V(1).Info("Skipping Adding node to cluster, already present.", "Follower.Pod", followerPod) } followerIdx++ @@ -278,7 +277,7 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter type clusterNodesResponse []string // clusterNodes will returns the response of CLUSTER NODES -func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Logger) ([]clusterNodesResponse, error) { +func clusterNodes(ctx context.Context, redisClient *redis.Client) ([]clusterNodesResponse, error) { output, err := redisClient.ClusterNodes(ctx).Result() if err != nil { return nil, err @@ -299,62 +298,60 @@ func clusterNodes(ctx context.Context, redisClient *redis.Client, logger logr.Lo } // ExecuteFailoverOperation will execute redis failover operations -func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { - err := executeFailoverCommand(ctx, client, logger, cr, "leader") +func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) error { + err := executeFailoverCommand(ctx, client, cr, "leader") if err != nil { - logger.Error(err, "Redis command failed for leader nodes") return err } - err = executeFailoverCommand(ctx, client, logger, cr, "follower") + err = executeFailoverCommand(ctx, client, cr, "follower") if err != nil { - logger.Error(err, "Redis command failed for follower nodes") return err } return nil } // executeFailoverCommand will execute failover command -func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, role string) error { +func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, role string) error { replicas := cr.Spec.GetReplicaCounts(role) podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { - logger.V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) - client := configureRedisClient(client, logger, cr, podName+strconv.Itoa(podCount)) + log.FromContext(ctx).V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) + client := configureRedisClient(ctx, client, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") flushcommand := redis.NewStringCmd(ctx, "flushall") err = client.Process(ctx, flushcommand) if err != nil { - logger.Error(err, "Redis flush command failed with this error") + log.FromContext(ctx).Error(err, "Redis flush command failed with this error") return err } } err = client.Process(ctx, cmd) if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } output, err := cmd.Result() if err != nil { - logger.Error(err, "Redis command failed with this error") + log.FromContext(ctx).Error(err, "Redis command failed with this error") return err } - logger.V(1).Info("Redis cluster failover executed", "Output", output) + log.FromContext(ctx).V(1).Info("Redis cluster failover executed", "Output", output) } return nil } // CheckRedisNodeCount will check the count of redis nodes -func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, nodeType string) int32 { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() var redisNodeType string - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { - logger.Error(err, "failed to get cluster nodes") + log.FromContext(ctx).Error(err, "failed to get cluster nodes") } count := len(clusterNodes) @@ -373,29 +370,29 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge count++ } } - logger.V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) + log.FromContext(ctx).V(1).Info("Number of redis nodes are", "Nodes", strconv.Itoa(count), "Type", nodeType) } else { - logger.V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) + log.FromContext(ctx).V(1).Info("Total number of redis nodes are", "Nodes", strconv.Itoa(count)) } return int32(count) } // RedisClusterStatusHealth use `redis-cli --cluster check 127.0.0.1:6379` -func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) bool { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() cmd := []string{"redis-cli", "--cluster", "check", "127.0.0.1:6379"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } cmd = append(cmd, "-a") cmd = append(cmd, pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - out, err := executeCommand1(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0") + out, err := executeCommand1(ctx, client, cr, cmd, cr.ObjectMeta.Name+"-leader-0") if err != nil { return false } @@ -409,10 +406,10 @@ func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, } // UnhealthyNodesInCluster returns the number of unhealthy nodes in the cluster cr -func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) (int, error) { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") +func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster) (int, error) { + redisClient := configureRedisClient(ctx, client, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() - clusterNodes, err := clusterNodes(ctx, redisClient, logger) + clusterNodes, err := clusterNodes(ctx, redisClient) if err != nil { return 0, err } @@ -422,7 +419,7 @@ func UnhealthyNodesInCluster(ctx context.Context, client kubernetes.Interface, l count++ } } - logger.V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) + log.FromContext(ctx).V(1).Info("Number of failed nodes in cluster", "Failed Node Count", count) return count, nil } @@ -435,7 +432,7 @@ func nodeFailedOrDisconnected(node clusterNodesResponse) bool { } // configureRedisClient will configure the Redis Client -func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { +func configureRedisClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -443,45 +440,45 @@ func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *r var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, *cr.Spec.Port), + Addr: getRedisServerAddress(ctx, client, redisInfo, *cr.Spec.Port), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, redisInfo.PodName) } return redis.NewClient(opts) } // executeCommand will execute the commands in pod -func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { - execOut, execErr := executeCommand1(client, logger, cr, cmd, podName) +func executeCommand(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) { + execOut, execErr := executeCommand1(ctx, client, cr, cmd, podName) if execErr != nil { - logger.Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut) return } - logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) + log.FromContext(ctx).V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut) } -func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { +func executeCommand1(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) { var ( execOut bytes.Buffer execErr bytes.Buffer ) config, err := GenerateK8sConfig()() if err != nil { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } - targetContainer, pod := getContainerID(client, logger, cr, podName) + targetContainer, pod := getContainerID(ctx, client, cr, podName) if targetContainer < 0 { - logger.Error(err, "Could not find pod to execute") + log.FromContext(ctx).Error(err, "Could not find pod to execute") return "", err } @@ -494,7 +491,7 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv }, scheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) if err != nil { - logger.Error(err, "Failed to init executor") + log.FromContext(ctx).Error(err, "Failed to init executor") return "", err } @@ -510,27 +507,27 @@ func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv } // getContainerID will return the id of container from pod -func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { +func getContainerID(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisCluster, podName string) (int, *corev1.Pod) { pod, err := client.CoreV1().Pods(cr.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Could not get pod info", "Pod Name", podName, "Namespace", cr.Namespace) return -1, nil } - logger.V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) + log.FromContext(ctx).V(1).Info("Pod info retrieved successfully", "Pod Name", podName, "Namespace", cr.Namespace) targetContainer := -1 for containerID, tr := range pod.Spec.Containers { - logger.V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Inspecting container", "Pod Name", podName, "Container ID", containerID, "Container Name", tr.Name) if tr.Name == cr.ObjectMeta.Name+"-leader" { targetContainer = containerID - logger.V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) + log.FromContext(ctx).V(1).Info("Leader container found", "Container ID", containerID, "Container Name", tr.Name) break } } if targetContainer == -1 { - logger.V(1).Info("Leader container not found in pod", "Pod Name", podName) + log.FromContext(ctx).V(1).Info("Leader container not found in pod", "Pod Name", podName) return -1, nil } @@ -538,9 +535,8 @@ func getContainerID(client kubernetes.Interface, logger logr.Logger, cr *redisv1 } // checkRedisNodePresence will check if the redis node exist in cluster or not -func checkRedisNodePresence(cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - logger.V(1).Info("Checking if Node is in cluster", "Node", nodeName) +func checkRedisNodePresence(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeList []clusterNodesResponse, nodeName string) bool { + log.FromContext(ctx).V(1).Info("Checking if Node is in cluster", "Node", nodeName) for _, node := range nodeList { s := strings.Split(node[1], ":") if s[0] == nodeName { @@ -550,14 +546,8 @@ func checkRedisNodePresence(cr *redisv1beta2.RedisCluster, nodeList []clusterNod return false } -// generateRedisManagerLogger will generate logging interface for Redis operations -func generateRedisManagerLogger(namespace, name string) logr.Logger { - reqLogger := log.WithValues("Request.RedisManager.Namespace", namespace, "Request.RedisManager.Name", name) - return reqLogger -} - // configureRedisClient will configure the Redis Client -func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { +func configureRedisReplicationClient(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, @@ -565,27 +555,27 @@ func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Lo var err error var pass string if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err = getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err = getRedisPassword(ctx, client, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { - logger.Error(err, "Error in getting redis password") + log.FromContext(ctx).Error(err, "Error in getting redis password") } } opts := &redis.Options{ - Addr: getRedisServerAddress(client, logger, redisInfo, 6379), + Addr: getRedisServerAddress(ctx, client, redisInfo, 6379), Password: pass, DB: 0, } if cr.Spec.TLS != nil { - opts.TLSConfig = getRedisTLSConfig(client, logger, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) + opts.TLSConfig = getRedisTLSConfig(ctx, client, cr.Namespace, cr.Spec.TLS.Secret.SecretName, podName) } return redis.NewClient(opts) } // Get Redis nodes by it's role i.e. master, slave and sentinel -func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { - statefulset, err := GetStatefulSet(cl, logger, cr.GetNamespace(), cr.GetName()) +func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, cr *redisv1beta2.RedisReplication, redisRole string) []string { + statefulset, err := GetStatefulSet(ctx, cl, cr.GetNamespace(), cr.GetName()) if err != nil { - logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) + log.FromContext(ctx).Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) } var pods []string @@ -593,9 +583,9 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - redisClient := configureRedisReplicationClient(cl, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, cl, cr, podName) defer redisClient.Close() - podRole := checkRedisServerRole(ctx, redisClient, logger, podName) + podRole := checkRedisServerRole(ctx, redisClient, podName) if podRole == redisRole { pods = append(pods, podName) } @@ -605,29 +595,29 @@ func GetRedisNodesByRole(ctx context.Context, cl kubernetes.Interface, logger lo } // Check the Redis Server Role i.e. master, slave and sentinel -func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) string { +func checkRedisServerRole(ctx context.Context, redisClient *redis.Client, podName string) string { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to Get the role Info of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to Get the role Info of the", "redis pod", podName) return "" } lines := strings.Split(info, "\r\n") for _, line := range lines { if strings.HasPrefix(line, "role:") { role := strings.TrimPrefix(line, "role:") - logger.V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) + log.FromContext(ctx).V(1).Info("Role of the Redis Pod", "pod", podName, "role", role) return role } } - logger.Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to find role from Info # Replication in", "redis pod", podName) return "" } // checkAttachedSlave would return redis pod name which has slave -func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger logr.Logger, podName string) int { +func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, podName string) int { info, err := redisClient.Info(ctx, "Replication").Result() if err != nil { - logger.Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to get the connected slaves count of the", "redis pod", podName) return -1 // return -1 if failed to get the connected slaves count } @@ -637,35 +627,35 @@ func checkAttachedSlave(ctx context.Context, redisClient *redis.Client, logger l var connected_slaves int connected_slaves, err = strconv.Atoi(strings.TrimPrefix(line, "connected_slaves:")) if err != nil { - logger.Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) + log.FromContext(ctx).Error(err, "Failed to convert the connected slaves count of the", "redis pod", podName) return -1 } - logger.V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) + log.FromContext(ctx).V(1).Info("Connected Slaves of the Redis Pod", "pod", podName, "connected_slaves", connected_slaves) return connected_slaves } } - logger.Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) + log.FromContext(ctx).Error(nil, "Failed to find connected_slaves from Info # Replication in", "redis pod", podName) return 0 } -func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { - logger.V(1).Info("Redis Master Node is set to", "pod", realMasterPod) +func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string, realMasterPod string) error { + log.FromContext(ctx).V(1).Info("Redis Master Node is set to", "pod", realMasterPod) realMasterInfo := RedisDetails{ PodName: realMasterPod, Namespace: cr.Namespace, } - realMasterPodIP := getRedisServerIP(client, logger, realMasterInfo) + realMasterPodIP := getRedisServerIP(ctx, client, realMasterInfo) for i := 0; i < len(masterPods); i++ { if masterPods[i] != realMasterPod { - redisClient := configureRedisReplicationClient(client, logger, cr, masterPods[i]) + redisClient := configureRedisReplicationClient(ctx, client, cr, masterPods[i]) defer redisClient.Close() - logger.V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() if err != nil { - logger.Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) + log.FromContext(ctx).Error(err, "Failed to set", "pod", masterPods[i], "to slave of", realMasterPod) return err } } @@ -674,12 +664,12 @@ func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interfa return nil } -func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { +func GetRedisReplicationRealMaster(ctx context.Context, client kubernetes.Interface, cr *redisv1beta2.RedisReplication, masterPods []string) string { for _, podName := range masterPods { - redisClient := configureRedisReplicationClient(client, logger, cr, podName) + redisClient := configureRedisReplicationClient(ctx, client, cr, podName) defer redisClient.Close() - if checkAttachedSlave(ctx, redisClient, logger, podName) > 0 { + if checkAttachedSlave(ctx, redisClient, podName) > 0 { return podName } } diff --git a/pkg/k8sutils/redis_test.go b/pkg/k8sutils/redis_test.go index 79314e02d..8f6035f3c 100644 --- a/pkg/k8sutils/redis_test.go +++ b/pkg/k8sutils/redis_test.go @@ -10,8 +10,6 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" mock_utils "github.com/OT-CONTAINER-KIT/redis-operator/mocks/utils" - "github.com/go-logr/logr" - "github.com/go-logr/logr/testr" "github.com/go-redis/redismock/v9" redis "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" @@ -49,7 +47,7 @@ func TestCheckRedisNodePresence(t *testing.T) { for _, tt := range tests { testname := fmt.Sprintf("%s,%s", tt.nodes, tt.ip) t.Run(testname, func(t *testing.T) { - ans := checkRedisNodePresence(cr, tt.nodes, tt.ip) + ans := checkRedisNodePresence(context.TODO(), cr, tt.nodes, tt.ip) if ans != tt.want { t.Errorf("got %t, want %t", ans, tt.want) } @@ -80,7 +78,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,redis-cluster-lea }) mock.ExpectClusterMeet(newPodIP, "6379").SetVal("OK") port := 6379 - err := repairDisconnectedMasters(ctx, k8sClient, logr.Discard(), &redisv1beta2.RedisCluster{ + err := repairDisconnectedMasters(ctx, k8sClient, &redisv1beta2.RedisCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, }, @@ -175,8 +173,8 @@ func TestGetRedisServerIP(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerIP(client, logger, tt.redisInfo) + + redisIP := getRedisServerIP(context.TODO(), client, tt.redisInfo) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty IP address") @@ -240,8 +238,8 @@ func TestGetRedisServerAddress(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - redisIP := getRedisServerAddress(client, logger, tt.redisInfo, 6379) + + redisIP := getRedisServerAddress(context.TODO(), client, tt.redisInfo, 6379) if tt.expectEmpty { assert.Empty(t, redisIP, "Expected an empty address") @@ -286,9 +284,8 @@ func TestGetRedisHostname(t *testing.T) { } func TestCreateSingleLeaderRedisCommand(t *testing.T) { - logger := testr.New(t) cr := &redisv1beta2.RedisCluster{} - cmd := CreateSingleLeaderRedisCommand(logger, cr) + cmd := CreateSingleLeaderRedisCommand(context.TODO(), cr) assert.Equal(t, "redis-cli", cmd[0]) assert.Equal(t, "CLUSTER", cmd[1]) @@ -353,9 +350,8 @@ func TestCreateMultipleLeaderRedisCommand(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := mock_utils.CreateFakeClientWithPodIPs_LeaderPods(tt.redisCluster) - logger := testr.New(t) - cmd := CreateMultipleLeaderRedisCommand(client, logger, tt.redisCluster) + cmd := CreateMultipleLeaderRedisCommand(context.TODO(), client, tt.redisCluster) assert.Equal(t, tt.expectedCommands, cmd) }) } @@ -391,7 +387,6 @@ func TestGetRedisTLSArgs(t *testing.T) { } func TestCreateRedisReplicationCommand(t *testing.T) { - logger := logr.Discard() type secret struct { name string namespace string @@ -530,7 +525,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) { objects = append(objects, secret...) client := fake.NewSimpleClientset(objects...) - cmd := createRedisReplicationCommand(client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod) + cmd := createRedisReplicationCommand(context.TODO(), client, tt.redisCluster, tt.leaderPod, tt.followerPod) // Assert the command is as expected using testify assert.Equal(t, tt.expectedCommand, cmd) @@ -614,8 +609,7 @@ func TestGetContainerID(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.setupPod) - logger := testr.New(t) - id, pod := getContainerID(client, logger, test.redisCluster, test.setupPod.Name) + id, pod := getContainerID(context.TODO(), client, test.redisCluster, test.setupPod.Name) if test.expectError { assert.Nil(t, pod, "Expected no pod but got one") assert.Equal(t, test.expectedID, id, "Expected ID does not match") @@ -630,8 +624,6 @@ func TestGetContainerID(t *testing.T) { } func Test_checkAttachedSlave(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string podName string @@ -709,7 +701,7 @@ func Test_checkAttachedSlave(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - slaveCount := checkAttachedSlave(ctx, client, logger, tt.podName) + slaveCount := checkAttachedSlave(ctx, client, tt.podName) assert.Equal(t, tt.expectedSlaveCount, slaveCount, "Test case: "+tt.name) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unmet expectations: %s", err) @@ -719,8 +711,6 @@ func Test_checkAttachedSlave(t *testing.T) { } func Test_checkRedisServerRole(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string podName string @@ -798,7 +788,7 @@ func Test_checkRedisServerRole(t *testing.T) { mock.ExpectInfo("Replication").SetVal(tt.infoReturn) } - role := checkRedisServerRole(ctx, client, logger, tt.podName) + role := checkRedisServerRole(ctx, client, tt.podName) if tt.shouldFail { assert.Empty(t, role, "Test case: "+tt.name) } else { @@ -812,7 +802,7 @@ func Test_checkRedisServerRole(t *testing.T) { } func TestClusterNodes(t *testing.T) { - logger := logr.Discard() // Discard logs + // Discard logs tests := []struct { name string @@ -853,7 +843,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself, } else { mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput) } - result, err := clusterNodes(context.TODO(), db, logger) + result, err := clusterNodes(context.TODO(), db) if tc.expectError != nil { assert.Nil(t, result) diff --git a/pkg/k8sutils/secrets.go b/pkg/k8sutils/secrets.go index 91cc852c2..8f095bc1e 100644 --- a/pkg/k8sutils/secrets.go +++ b/pkg/k8sutils/secrets.go @@ -7,35 +7,32 @@ import ( "errors" "strings" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var log = logf.Log.WithName("controller_redis") - // getRedisPassword method will return the redis password from the secret -func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { +func getRedisPassword(ctx context.Context, client kubernetes.Interface, namespace, name, secretKey string) (string, error) { secretName, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "Failed in getting existing secret for redis") + logf.FromContext(ctx).Error(err, "Failed in getting existing secret for redis") return "", err } for key, value := range secretName.Data { if key == secretKey { - logger.V(1).Info("Secret key found in the secret", "secretKey", secretKey) + logf.FromContext(ctx).Info("Secret key found in the secret", "secretKey", secretKey) return strings.TrimSpace(string(value)), nil } } - logger.Error(errors.New("secret key not found"), "Secret key not found in the secret") + logf.FromContext(ctx).Error(errors.New("secret key not found"), "Secret key not found in the secret") return "", nil } -func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, namespace, tlsSecretName, podName string) *tls.Config { +func getRedisTLSConfig(ctx context.Context, client kubernetes.Interface, namespace, tlsSecretName, podName string) *tls.Config { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), tlsSecretName, metav1.GetOptions{}) if err != nil { - logger.V(1).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Failed in getting TLS secret", "secretName", tlsSecretName, "namespace", namespace) return nil } @@ -44,20 +41,20 @@ func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, namespac tlsCaCertificate, caExists := secret.Data["ca.crt"] if !certExists || !keyExists || !caExists { - logger.Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") + logf.FromContext(ctx).Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") return nil } cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) if err != nil { - logger.V(1).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Couldn't load TLS client key pair", "secretName", tlsSecretName, "namespace", namespace) return nil } tlsCaCertificates := x509.NewCertPool() ok := tlsCaCertificates.AppendCertsFromPEM(tlsCaCertificate) if !ok { - logger.V(1).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) + logf.FromContext(ctx).Error(err, "Invalid CA Certificates", "secretName", tlsSecretName, "namespace", namespace) return nil } diff --git a/pkg/k8sutils/secrets_test.go b/pkg/k8sutils/secrets_test.go index cb1734f6f..10c42d35c 100644 --- a/pkg/k8sutils/secrets_test.go +++ b/pkg/k8sutils/secrets_test.go @@ -1,13 +1,13 @@ package k8sutils import ( + "context" "os" "path/filepath" "testing" common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -84,8 +84,8 @@ func Test_getRedisPassword(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - got, err := getRedisPassword(client, logger, tt.namespace, tt.secretName, tt.secretKey) + + got, err := getRedisPassword(context.TODO(), client, tt.namespace, tt.secretName, tt.secretKey) if tt.expectedErr { require.Error(t, err, "Expected an error but didn't get one") @@ -221,8 +221,8 @@ func Test_getRedisTLSConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := tt.setup() - logger := testr.New(t) - tlsConfig := getRedisTLSConfig(client, logger, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) + + tlsConfig := getRedisTLSConfig(context.TODO(), client, tt.redisCluster.Namespace, tt.redisCluster.Spec.TLS.Secret.SecretName, tt.redisInfo.PodName) if tt.expectTLS { require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") diff --git a/pkg/k8sutils/services.go b/pkg/k8sutils/services.go index 627117277..1ad9aa63d 100644 --- a/pkg/k8sutils/services.go +++ b/pkg/k8sutils/services.go @@ -4,12 +4,12 @@ import ( "context" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -94,66 +94,59 @@ func generateServiceType(k8sServiceType string) corev1.ServiceType { } // createService is a method to create service is Kubernetes -func createService(kusClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func createService(ctx context.Context, kusClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := kusClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis service creation is failed") + log.FromContext(ctx).Error(err, "Redis service creation is failed") return err } - logger.V(1).Info("Redis service creation is successful") + log.FromContext(ctx).V(1).Info("Redis service creation is successful") return nil } // updateService is a method to update service is Kubernetes -func updateService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, service *corev1.Service) error { +func updateService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, service *corev1.Service) error { _, err := k8sClient.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Redis service update failed") + log.FromContext(ctx).Error(err, "Redis service update failed") return err } - logger.V(1).Info("Redis service updated successfully") + log.FromContext(ctx).V(1).Info("Redis service updated successfully") return nil } // getService is a method to get service is Kubernetes -func getService(k8sClient kubernetes.Interface, logger logr.Logger, namespace string, name string) (*corev1.Service, error) { +func getService(ctx context.Context, k8sClient kubernetes.Interface, namespace string, name string) (*corev1.Service, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("Service", "v1"), } serviceInfo, err := k8sClient.CoreV1().Services(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis service get action is failed") + log.FromContext(ctx).V(1).Info("Redis service get action is failed") return nil, err } - logger.V(1).Info("Redis service get action is successful") + log.FromContext(ctx).V(1).Info("Redis service get action is successful") return serviceInfo, nil } -func serviceLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Service.Namespace", namespace, "Request.Service.Name", name) - return reqLogger -} - // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdateService(namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { - logger := serviceLogger(namespace, serviceMeta.Name) +func CreateOrUpdateService(ctx context.Context, namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, cl kubernetes.Interface, extra ...corev1.ServicePort) error { serviceDef := generateServiceDef(serviceMeta, epp, ownerDef, headless, serviceType, port, extra...) - storedService, err := getService(cl, logger, namespace, serviceMeta.GetName()) + storedService, err := getService(ctx, cl, namespace, serviceMeta.GetName()) if err != nil { if errors.IsNotFound(err) { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(serviceDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis service with compare annotations") + log.FromContext(ctx).Error(err, "Unable to patch redis service with compare annotations") } - return createService(cl, logger, namespace, serviceDef) + return createService(ctx, cl, namespace, serviceDef) } return err } - return patchService(storedService, serviceDef, namespace, cl) + return patchService(ctx, storedService, serviceDef, namespace, cl) } // patchService will patch Redis Kubernetes service -func patchService(storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { - logger := serviceLogger(namespace, storedService.Name) +func patchService(ctx context.Context, storedService *corev1.Service, newService *corev1.Service, namespace string, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newService.ResourceVersion = storedService.ResourceVersion newService.CreationTimestamp = storedService.CreationTimestamp @@ -169,11 +162,11 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in service Detected, Updating...", "patch", string(patchResult.Patch)) for key, value := range storedService.Annotations { if _, present := newService.Annotations[key]; !present { @@ -181,12 +174,12 @@ func patchService(storedService *corev1.Service, newService *corev1.Service, nam } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newService); err != nil { - logger.Error(err, "Unable to patch redis service with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis service with comparison object") return err } - logger.V(1).Info("Syncing Redis service with defined properties") - return updateService(cl, logger, namespace, newService) + log.FromContext(ctx).V(1).Info("Syncing Redis service with defined properties") + return updateService(ctx, cl, namespace, newService) } - logger.V(1).Info("Redis service is already in-sync") + log.FromContext(ctx).V(1).Info("Redis service is already in-sync") return nil } diff --git a/pkg/k8sutils/services_test.go b/pkg/k8sutils/services_test.go index 5906d61db..96e2fc1c5 100644 --- a/pkg/k8sutils/services_test.go +++ b/pkg/k8sutils/services_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -311,7 +310,6 @@ func Test_createService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tt.exist { k8sClient = k8sClientFake.NewSimpleClientset(tt.service.DeepCopyObject()) @@ -319,7 +317,7 @@ func Test_createService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - err := createService(k8sClient, logger, tt.service.GetNamespace(), tt.service) + err := createService(context.TODO(), k8sClient, tt.service.GetNamespace(), tt.service) if tt.wantErr { assert.Error(t, err) } else { @@ -407,10 +405,9 @@ func Test_updateService(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) k8sClient := k8sClientFake.NewSimpleClientset(tt.current.DeepCopyObject()) - err := updateService(k8sClient, logger, tt.servinceNamespace, tt.updated) + err := updateService(context.TODO(), k8sClient, tt.servinceNamespace, tt.updated) if tt.wantErr { assert.Error(t, err) } else { @@ -460,7 +457,6 @@ func Test_getService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := testr.New(t) var k8sClient *k8sClientFake.Clientset if tt.have != nil { k8sClient = k8sClientFake.NewSimpleClientset(tt.have.DeepCopyObject()) @@ -468,7 +464,7 @@ func Test_getService(t *testing.T) { k8sClient = k8sClientFake.NewSimpleClientset() } - got, err := getService(k8sClient, logger, tt.want.GetNamespace(), tt.want.GetName()) + got, err := getService(context.TODO(), k8sClient, tt.want.GetNamespace(), tt.want.GetName()) if tt.wantErr { assert.Error(t, err) } else { diff --git a/pkg/k8sutils/statefulset.go b/pkg/k8sutils/statefulset.go index bc31d4975..63060e1fb 100644 --- a/pkg/k8sutils/statefulset.go +++ b/pkg/k8sutils/statefulset.go @@ -11,7 +11,6 @@ import ( redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/go-logr/logr" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -22,6 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/utils/env" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" ) type StatefulSet interface { @@ -30,14 +30,11 @@ type StatefulSet interface { type StatefulSetService struct { kubeClient kubernetes.Interface - log logr.Logger } -func NewStatefulSetService(kubeClient kubernetes.Interface, log logr.Logger) *StatefulSetService { - log = log.WithValues("service", "k8s.statefulset") +func NewStatefulSetService(kubeClient kubernetes.Interface) *StatefulSetService { return &StatefulSetService{ kubeClient: kubeClient, - log: log, } } @@ -45,13 +42,11 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, var ( partition = 0 replicas = 1 - - logger = s.log.WithValues("namespace", namespace, "name", name) ) sts, err := s.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - logger.Error(err, "failed to get statefulset") + log.FromContext(ctx).Error(err, "failed to get statefulset") return false } @@ -63,19 +58,19 @@ func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, } if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) { - logger.V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) return false } if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { - logger.V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) return false } if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { - logger.V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) return false } if int(sts.Status.ReadyReplicas) != replicas { - logger.V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) + log.FromContext(ctx).V(1).Info("StatefulSet is not ready", "Status.ReadyReplicas", sts.Status.ReadyReplicas, "Replicas", replicas) return false } return true @@ -154,25 +149,24 @@ type initContainerParameters struct { } // CreateOrUpdateStateFul method will create or update Redis service -func CreateOrUpdateStateFul(cl kubernetes.Interface, logger logr.Logger, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { - storedStateful, err := GetStatefulSet(cl, logger, namespace, stsMeta.Name) +func CreateOrUpdateStateFul(ctx context.Context, cl kubernetes.Interface, namespace string, stsMeta metav1.ObjectMeta, params statefulSetParameters, ownerDef metav1.OwnerReference, initcontainerParams initContainerParameters, containerParams containerParameters, sidecars *[]redisv1beta2.Sidecar) error { + storedStateful, err := GetStatefulSet(ctx, cl, namespace, stsMeta.Name) statefulSetDef := generateStatefulSetsDef(stsMeta, params, ownerDef, initcontainerParams, containerParams, getSidecars(sidecars)) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(statefulSetDef); err != nil { //nolint - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if apierrors.IsNotFound(err) { - return createStatefulSet(cl, logger, namespace, statefulSetDef) + return createStatefulSet(ctx, cl, namespace, statefulSetDef) } return err } - return patchStatefulSet(storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) + return patchStatefulSet(ctx, storedStateful, statefulSetDef, namespace, params.RecreateStatefulSet, cl) } // patchStateFulSet will patch Redis Kubernetes StateFulSet -func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { - logger := statefulSetLogger(namespace, storedStateful.Name) +func patchStatefulSet(ctx context.Context, storedStateful *appsv1.StatefulSet, newStateful *appsv1.StatefulSet, namespace string, recreateStateFulSet bool, cl kubernetes.Interface) error { // We want to try and keep this atomic as possible. newStateful.ResourceVersion = storedStateful.ResourceVersion newStateful.CreationTimestamp = storedStateful.CreationTimestamp @@ -185,11 +179,11 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St patch.IgnoreField("apiVersion"), ) if err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } if !patchResult.IsEmpty() { - logger.V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) + log.FromContext(ctx).V(1).Info("Changes in statefulset Detected, Updating...", "patch", string(patchResult.Patch)) if len(newStateful.Spec.VolumeClaimTemplates) >= 1 && len(newStateful.Spec.VolumeClaimTemplates) == len(storedStateful.Spec.VolumeClaimTemplates) { // Field is immutable therefore we MUST keep it as is. if !apiequality.Semantic.DeepEqual(newStateful.Spec.VolumeClaimTemplates[0].Spec, storedStateful.Spec.VolumeClaimTemplates[0].Spec) { @@ -237,7 +231,7 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St if !updateFailed { updateFailed = true } - logger.Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") + log.FromContext(ctx).Error(fmt.Errorf("redis:%s resize pvc failed:%s", storedStateful.Name, err.Error()), "") } } } @@ -246,9 +240,9 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St annotations["storageCapacity"] = fmt.Sprintf("%d", stateCapacity) storedStateful.Annotations = annotations if realUpdate { - logger.Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize pvc from %d to %d", storedStateful.Name, storedCapacity, stateCapacity)) } else { - logger.Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) + log.FromContext(ctx).V(1).Info(fmt.Sprintf("redis:%s resize noting,just set annotations", storedStateful.Name)) } } } @@ -265,12 +259,12 @@ func patchStatefulSet(storedStateful *appsv1.StatefulSet, newStateful *appsv1.St } } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(newStateful); err != nil { - logger.Error(err, "Unable to patch redis statefulset with comparison object") + log.FromContext(ctx).Error(err, "Unable to patch redis statefulset with comparison object") return err } - return updateStatefulSet(cl, logger, namespace, newStateful, recreateStateFulSet) + return updateStatefulSet(ctx, cl, namespace, newStateful, recreateStateFulSet) } - logger.V(1).Info("Reconciliation Complete, no Changes required.") + log.FromContext(ctx).V(1).Info("Reconciliation Complete, no Changes required.") return nil } @@ -767,18 +761,18 @@ func getEnvironmentVariables(role string, enabledPassword *bool, secretName *str } // createStatefulSet is a method to create statefulset in Kubernetes -func createStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet) error { +func createStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet) error { _, err := cl.AppsV1().StatefulSets(namespace).Create(context.TODO(), stateful, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Redis stateful creation failed") + log.FromContext(ctx).Error(err, "Redis stateful creation failed") return err } - logger.V(1).Info("Redis stateful successfully created") + log.FromContext(ctx).V(1).Info("Redis stateful successfully created") return nil } // updateStatefulSet is a method to update statefulset in Kubernetes -func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { +func updateStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, stateful *appsv1.StatefulSet, recreateStateFulSet bool) error { _, err := cl.AppsV1().StatefulSets(namespace).Update(context.TODO(), stateful, metav1.UpdateOptions{}) if recreateStateFulSet { sErr, ok := err.(*apierrors.StatusError) @@ -787,7 +781,7 @@ func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st for messageCount, cause := range sErr.ErrStatus.Details.Causes { failMsg[messageCount] = cause.Message } - logger.V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) + log.FromContext(ctx).V(1).Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) propagationPolicy := metav1.DeletePropagationForeground if err := cl.AppsV1().StatefulSets(namespace).Delete(context.TODO(), stateful.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { //nolint return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") @@ -795,33 +789,27 @@ func updateStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace st } } if err != nil { - logger.Error(err, "Redis statefulset update failed") + log.FromContext(ctx).Error(err, "Redis statefulset update failed") return err } - logger.V(1).Info("Redis statefulset successfully updated ") + log.FromContext(ctx).V(1).Info("Redis statefulset successfully updated ") return nil } // GetStateFulSet is a method to get statefulset in Kubernetes -func GetStatefulSet(cl kubernetes.Interface, logger logr.Logger, namespace string, name string) (*appsv1.StatefulSet, error) { +func GetStatefulSet(ctx context.Context, cl kubernetes.Interface, namespace string, name string) (*appsv1.StatefulSet, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("StatefulSet", "apps/v1"), } statefulInfo, err := cl.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, getOpts) if err != nil { - logger.V(1).Info("Redis statefulset get action failed") + log.FromContext(ctx).V(1).Info("Redis statefulset get action failed") return nil, err } - logger.V(1).Info("Redis statefulset get action was successful") + log.FromContext(ctx).V(1).Info("Redis statefulset get action was successful") return statefulInfo, nil } -// statefulSetLogger will generate logging interface for Statfulsets -func statefulSetLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.StatefulSet.Namespace", namespace, "Request.StatefulSet.Name", name) - return reqLogger -} - func getSidecars(sidecars *[]redisv1beta2.Sidecar) []redisv1beta2.Sidecar { if sidecars == nil { return []redisv1beta2.Sidecar{} diff --git a/pkg/k8sutils/statefulset_test.go b/pkg/k8sutils/statefulset_test.go index f7d8ea7ae..b412c7456 100644 --- a/pkg/k8sutils/statefulset_test.go +++ b/pkg/k8sutils/statefulset_test.go @@ -7,7 +7,6 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -196,8 +195,6 @@ func TestGetVolumeMount(t *testing.T) { } func Test_GetStatefulSet(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string sts appsv1.StatefulSet @@ -229,7 +226,7 @@ func Test_GetStatefulSet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := k8sClientFake.NewSimpleClientset(test.sts.DeepCopy()) - _, err := GetStatefulSet(client, logger, test.stsNamespace, test.stsName) + _, err := GetStatefulSet(context.TODO(), client, test.stsNamespace, test.stsName) if test.present { assert.Nil(t, err) } else { @@ -240,8 +237,6 @@ func Test_GetStatefulSet(t *testing.T) { } func Test_createStatefulSet(t *testing.T) { - logger := logr.Discard() - tests := []struct { name string sts appsv1.StatefulSet @@ -279,7 +274,7 @@ func Test_createStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := createStatefulSet(client, logger, test.sts.GetNamespace(), &test.sts) + err := createStatefulSet(context.TODO(), client, test.sts.GetNamespace(), &test.sts) if test.present { assert.NotNil(t, err) } else { @@ -290,7 +285,6 @@ func Test_createStatefulSet(t *testing.T) { } func TestUpdateStatefulSet(t *testing.T) { - logger := logr.Discard() tests := []struct { name string existingStsSpec appsv1.StatefulSetSpec @@ -395,7 +389,7 @@ func TestUpdateStatefulSet(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := updateStatefulSet(client, logger, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) + err := updateStatefulSet(context.TODO(), client, updatedSts.GetNamespace(), &updatedSts, test.recreateSts) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -412,7 +406,6 @@ func TestUpdateStatefulSet(t *testing.T) { } func TestCreateOrUpdateStateFul(t *testing.T) { - logger := logr.Discard() tests := []struct { name string stsParams statefulSetParameters @@ -554,7 +547,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { } else { client = k8sClientFake.NewSimpleClientset() } - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) if test.expectErr != nil { assert.Error(err, "Expected Error while updating Statefulset") assert.Equal(test.expectErr, err) @@ -577,7 +570,7 @@ func TestCreateOrUpdateStateFul(t *testing.T) { client = k8sClientFake.NewSimpleClientset() - err := CreateOrUpdateStateFul(client, logger, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) + err := CreateOrUpdateStateFul(context.TODO(), client, updatedSts.GetNamespace(), updatedSts.ObjectMeta, test.stsParams, test.stsOwnerDef, test.initContainerParams, test.containerParams, test.sidecar) assert.Nil(err) } }) diff --git a/pkg/k8sutils/status.go b/pkg/k8sutils/status.go index aa19ea150..9f2f19216 100644 --- a/pkg/k8sutils/status.go +++ b/pkg/k8sutils/status.go @@ -6,23 +6,16 @@ import ( "github.com/OT-CONTAINER-KIT/redis-operator/api/status" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// statusLogger will generate logging interface for status -func statusLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Namespace", namespace, "Request.Name", name) - return reqLogger -} - // UpdateRedisClusterStatus will update the status of the RedisCluster -func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { - logger := statusLogger(cr.Namespace, cr.Name) +func UpdateRedisClusterStatus(ctx context.Context, cr *redisv1beta2.RedisCluster, state status.RedisClusterState, reason string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { newStatus := redisv1beta2.RedisClusterStatus{ State: state, Reason: reason, @@ -40,14 +33,14 @@ func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, state status.RedisC } unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cr) if err != nil { - logger.Error(err, "Failed to convert CR to unstructured object") + log.FromContext(ctx).Error(err, "Failed to convert CR to unstructured object") return err } unstructuredRedisCluster := &unstructured.Unstructured{Object: unstructuredObj} _, err = dcl.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{}) if err != nil { - logger.Error(err, "Failed to update status") + log.FromContext(ctx).Error(err, "Failed to update status") return err } return nil From aa2e89567d5e14388f3676e5e155182df2795c98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Nov 2024 13:41:41 +0800 Subject: [PATCH 09/28] chore(deps): bump codecov/codecov-action from 4 to 5 (#1130) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4 to 5. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4...v5) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a68bf24f3..90f6adf25 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -50,7 +50,7 @@ jobs: - name: Run Go Tests with coverage run: go test ./... -coverprofile=coverage.txt -covermode=atomic - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: file: ./coverage.txt fail_ci_if_error: false From 7953e5320e80cef8eb2959db4639a1349c5fb239 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Nov 2024 13:41:58 +0800 Subject: [PATCH 10/28] chore(deps): bump github.com/redis/go-redis/v9 from 9.5.1 to 9.7.0 (#1131) Bumps [github.com/redis/go-redis/v9](https://github.com/redis/go-redis) from 9.5.1 to 9.7.0. - [Release notes](https://github.com/redis/go-redis/releases) - [Changelog](https://github.com/redis/go-redis/blob/master/CHANGELOG.md) - [Commits](https://github.com/redis/go-redis/compare/v9.5.1...v9.7.0) --- updated-dependencies: - dependency-name: github.com/redis/go-redis/v9 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 88b3a7f6b..a10f233c7 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/onsi/ginkgo/v2 v2.20.1 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 - github.com/redis/go-redis/v9 v9.5.1 + github.com/redis/go-redis/v9 v9.7.0 github.com/stretchr/testify v1.9.0 k8s.io/api v0.29.4 k8s.io/apimachinery v0.29.4 diff --git a/go.sum b/go.sum index fae2f852c..ca3356138 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,8 @@ github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4Ds github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= -github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= -github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= From 53b95a0a20def3cb761eeae45e0507cf8238398e Mon Sep 17 00:00:00 2001 From: Dariusch Ochlast Date: Mon, 18 Nov 2024 10:48:38 +0100 Subject: [PATCH 11/28] fix: pdb value mapping in redis-sentinel (#1136) Signed-off-by: Dariusch Ochlast --- charts/redis-sentinel/templates/redis-sentinel.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/redis-sentinel/templates/redis-sentinel.yaml b/charts/redis-sentinel/templates/redis-sentinel.yaml index 16581b49f..3685d3a4f 100644 --- a/charts/redis-sentinel/templates/redis-sentinel.yaml +++ b/charts/redis-sentinel/templates/redis-sentinel.yaml @@ -89,9 +89,9 @@ spec: {{- end }} {{- if .Values.pdb.enabled }} pdb: - enabled: {{ .enabled }} - minAvailable: {{ .minAvailable }} - maxUnavailable: {{ .maxUnavailable }} + enabled: {{ .Values.pdb.enabled }} + minAvailable: {{ .Values.pdb.minAvailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} {{- end }} {{- with .Values.livenessProbe }} livenessProbe: From b9034e8674d2ad4c34a7ec2818f64daff284f2e5 Mon Sep 17 00:00:00 2001 From: yangw Date: Mon, 2 Dec 2024 17:36:01 +0800 Subject: [PATCH 12/28] fix: chart render error when enable initcontainer (#1146) fix: update initContainer and sidecar property references - Updated version and appVersion for redis, redis-cluster, redis-replication, and redis-sentinel charts. - Modified initContainer and sidecar property references in templates to include the current context. This update ensures consistency across the Redis charts and improves template rendering. Signed-off-by: drivebyer --- charts/redis-cluster/Chart.yaml | 4 ++-- charts/redis-cluster/templates/redis-cluster.yaml | 4 ++-- charts/redis-replication/Chart.yaml | 4 ++-- charts/redis-replication/templates/redis-replication.yaml | 4 ++-- charts/redis-sentinel/Chart.yaml | 4 ++-- charts/redis-sentinel/templates/redis-sentinel.yaml | 4 ++-- charts/redis/Chart.yaml | 4 ++-- charts/redis/templates/redis-standalone.yaml | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/charts/redis-cluster/Chart.yaml b/charts/redis-cluster/Chart.yaml index 81bf93921..7acf90f44 100644 --- a/charts/redis-cluster/Chart.yaml +++ b/charts/redis-cluster/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-cluster description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.2 -appVersion: "0.16.2" +version: 0.16.3 +appVersion: "0.16.3" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-cluster/templates/redis-cluster.yaml b/charts/redis-cluster/templates/redis-cluster.yaml index fd12b409e..33bab6cb7 100644 --- a/charts/redis-cluster/templates/redis-cluster.yaml +++ b/charts/redis-cluster/templates/redis-cluster.yaml @@ -78,10 +78,10 @@ spec: secretName: {{ .Values.acl.secret.secretName | quote }} {{- end }} {{- if and .Values.sidecars (ne .Values.sidecars.name "") (ne .Values.sidecars.image "") }} - sidecars: {{ include "sidecar.properties" | nindent 4 }} + sidecars: {{ include "sidecar.properties" . | nindent 4 }} {{- end }} {{- if and .Values.initContainer .Values.initContainer.enabled (ne .Values.initContainer.image "") }} - initContainer: {{ include "initContainer.properties" | nindent 4 }} + initContainer: {{ include "initContainer.properties" . | nindent 4 }} {{- end }} {{- if .Values.env }} env: {{ toYaml .Values.env | nindent 4 }} diff --git a/charts/redis-replication/Chart.yaml b/charts/redis-replication/Chart.yaml index dbc2fde62..b5d2bd4ac 100644 --- a/charts/redis-replication/Chart.yaml +++ b/charts/redis-replication/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-replication description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.4 -appVersion: "0.16.4" +version: 0.16.5 +appVersion: "0.16.5" type: application engine: gotpl maintainers: diff --git a/charts/redis-replication/templates/redis-replication.yaml b/charts/redis-replication/templates/redis-replication.yaml index 49021123e..ebc3465ab 100644 --- a/charts/redis-replication/templates/redis-replication.yaml +++ b/charts/redis-replication/templates/redis-replication.yaml @@ -81,10 +81,10 @@ spec: secretName: {{ .Values.acl.secret.secretName | quote }} {{- end }} {{- if and .Values.initContainer .Values.initContainer.enabled (ne .Values.initContainer.image "") }} - initContainer: {{ include "initContainer.properties" | nindent 4 }} + initContainer: {{ include "initContainer.properties" . | nindent 4 }} {{- end }} {{- if and .Values.sidecars (ne .Values.sidecars.name "") (ne .Values.sidecars.image "") }} - sidecars: {{ include "sidecar.properties" | nindent 4 }} + sidecars: {{ include "sidecar.properties" . | nindent 4 }} {{- end }} {{- if and .Values.serviceAccountName (ne .Values.serviceAccountName "") }} serviceAccountName: "{{ .Values.serviceAccountName }}" diff --git a/charts/redis-sentinel/Chart.yaml b/charts/redis-sentinel/Chart.yaml index 883873ff0..e00ceedd5 100644 --- a/charts/redis-sentinel/Chart.yaml +++ b/charts/redis-sentinel/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-sentinel description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.6 -appVersion: "0.16.6" +version: 0.16.7 +appVersion: "0.16.7" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-sentinel/templates/redis-sentinel.yaml b/charts/redis-sentinel/templates/redis-sentinel.yaml index 3685d3a4f..410c2819d 100644 --- a/charts/redis-sentinel/templates/redis-sentinel.yaml +++ b/charts/redis-sentinel/templates/redis-sentinel.yaml @@ -102,10 +102,10 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- if and .Values.initContainer .Values.initContainer.enabled (ne .Values.initContainer.image "") }} - initContainer: {{ include "initContainer.properties" | nindent 4 }} + initContainer: {{ include "initContainer.properties" . | nindent 4 }} {{- end }} {{- if and .Values.sidecars (ne .Values.sidecars.name "") (ne .Values.sidecars.image "") }} - sidecars: {{ include "sidecar.properties" | nindent 4 }} + sidecars: {{ include "sidecar.properties" . | nindent 4 }} {{- end }} {{- if and .Values.serviceAccountName (ne .Values.serviceAccountName "") }} serviceAccountName: "{{ .Values.serviceAccountName }}" diff --git a/charts/redis/Chart.yaml b/charts/redis/Chart.yaml index c8e772101..e6fc0d3f6 100644 --- a/charts/redis/Chart.yaml +++ b/charts/redis/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: redis description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.2 -appVersion: "0.16.2" +version: 0.16.3 +appVersion: "0.16.3" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis/templates/redis-standalone.yaml b/charts/redis/templates/redis-standalone.yaml index ebfce6408..25f0bf234 100644 --- a/charts/redis/templates/redis-standalone.yaml +++ b/charts/redis/templates/redis-standalone.yaml @@ -80,10 +80,10 @@ spec: secretName: {{ .Values.acl.secret.secretName | quote }} {{- end }} {{- if and .Values.initContainer .Values.initContainer.enabled (ne .Values.initContainer.image "") }} - initContainer: {{ include "initContainer.properties" | nindent 4 }} + initContainer: {{ include "initContainer.properties" . | nindent 4 }} {{- end }} {{- if and .Values.sidecars (ne .Values.sidecars.name "") (ne .Values.sidecars.image "") }} - sidecars: {{ include "sidecar.properties" | nindent 4 }} + sidecars: {{ include "sidecar.properties" . | nindent 4 }} {{- end }} {{- if and .Values.serviceAccountName (ne .Values.serviceAccountName "") }} serviceAccountName: "{{ .Values.serviceAccountName }}" From a5dd80ee4a9c8c36eddb976ca850eaeb6a9ca718 Mon Sep 17 00:00:00 2001 From: yangw Date: Thu, 5 Dec 2024 16:19:50 +0800 Subject: [PATCH 13/28] fix: initContainer enabled properties not define in template (#1152) - Bumped version and appVersion for redis, redis-cluster, redis-replication, and redis-sentinel charts to 0.16.4, 0.16.4, 0.16.6, and 0.16.8 respectively. - Enhanced initContainer properties in templates to include the 'enabled' field for better configuration management. This update ensures consistency across the Redis charts and improves template rendering. Signed-off-by: drivebyer --- charts/redis-cluster/Chart.yaml | 4 ++-- charts/redis-cluster/templates/_helpers.tpl | 1 + charts/redis-replication/Chart.yaml | 4 ++-- charts/redis-replication/templates/_helpers.tpl | 1 + charts/redis-sentinel/Chart.yaml | 4 ++-- charts/redis-sentinel/templates/_helpers.tpl | 1 + charts/redis/Chart.yaml | 4 ++-- charts/redis/templates/_helpers.tpl | 1 + 8 files changed, 12 insertions(+), 8 deletions(-) diff --git a/charts/redis-cluster/Chart.yaml b/charts/redis-cluster/Chart.yaml index 7acf90f44..6238ab9cb 100644 --- a/charts/redis-cluster/Chart.yaml +++ b/charts/redis-cluster/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-cluster description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.3 -appVersion: "0.16.3" +version: 0.16.4 +appVersion: "0.16.4" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-cluster/templates/_helpers.tpl b/charts/redis-cluster/templates/_helpers.tpl index 0dd154eb4..352436973 100644 --- a/charts/redis-cluster/templates/_helpers.tpl +++ b/charts/redis-cluster/templates/_helpers.tpl @@ -64,6 +64,7 @@ env: {{- define "initContainer.properties" -}} {{- with .Values.initContainer }} {{- if .enabled }} +enabled: {{ .enabled }} image: {{ .image }} {{- if .imagePullPolicy }} imagePullPolicy: {{ .imagePullPolicy }} diff --git a/charts/redis-replication/Chart.yaml b/charts/redis-replication/Chart.yaml index b5d2bd4ac..800a0592f 100644 --- a/charts/redis-replication/Chart.yaml +++ b/charts/redis-replication/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-replication description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.5 -appVersion: "0.16.5" +version: 0.16.6 +appVersion: "0.16.6" type: application engine: gotpl maintainers: diff --git a/charts/redis-replication/templates/_helpers.tpl b/charts/redis-replication/templates/_helpers.tpl index cfc58681b..944ea732c 100644 --- a/charts/redis-replication/templates/_helpers.tpl +++ b/charts/redis-replication/templates/_helpers.tpl @@ -19,6 +19,7 @@ app.kubernetes.io/component: middleware {{- define "initContainer.properties" -}} {{- with .Values.initContainer }} {{- if .enabled }} +enabled: {{ .enabled }} image: {{ .image }} {{- if .imagePullPolicy }} imagePullPolicy: {{ .imagePullPolicy }} diff --git a/charts/redis-sentinel/Chart.yaml b/charts/redis-sentinel/Chart.yaml index e00ceedd5..8168667fd 100644 --- a/charts/redis-sentinel/Chart.yaml +++ b/charts/redis-sentinel/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: redis-sentinel description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.7 -appVersion: "0.16.7" +version: 0.16.8 +appVersion: "0.16.8" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis-sentinel/templates/_helpers.tpl b/charts/redis-sentinel/templates/_helpers.tpl index c918548fd..49e46984d 100644 --- a/charts/redis-sentinel/templates/_helpers.tpl +++ b/charts/redis-sentinel/templates/_helpers.tpl @@ -19,6 +19,7 @@ app.kubernetes.io/component: middleware {{- define "initContainer.properties" -}} {{- with .Values.initContainer }} {{- if .enabled }} +enabled: {{ .enabled }} image: {{ .image }} {{- if .imagePullPolicy }} imagePullPolicy: {{ .imagePullPolicy }} diff --git a/charts/redis/Chart.yaml b/charts/redis/Chart.yaml index e6fc0d3f6..8af3add95 100644 --- a/charts/redis/Chart.yaml +++ b/charts/redis/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: redis description: Provides easy redis setup definitions for Kubernetes services, and deployment. -version: 0.16.3 -appVersion: "0.16.3" +version: 0.16.4 +appVersion: "0.16.4" home: https://github.com/ot-container-kit/redis-operator sources: - https://github.com/ot-container-kit/redis-operator diff --git a/charts/redis/templates/_helpers.tpl b/charts/redis/templates/_helpers.tpl index 698c901ea..abdeb992a 100644 --- a/charts/redis/templates/_helpers.tpl +++ b/charts/redis/templates/_helpers.tpl @@ -19,6 +19,7 @@ app.kubernetes.io/component: middleware {{- define "initContainer.properties" -}} {{- with .Values.initContainer }} {{- if .enabled }} +enabled: {{ .enabled }} image: {{ .image }} {{- if .imagePullPolicy }} imagePullPolicy: {{ .imagePullPolicy }} From e9ec33f735e3f6fdb0179e932f9ed5202da88dc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:32:53 +0800 Subject: [PATCH 14/28] chore(deps): bump kyverno/action-install-chainsaw from 0.2.11 to 0.2.12 (#1151) Bumps [kyverno/action-install-chainsaw](https://github.com/kyverno/action-install-chainsaw) from 0.2.11 to 0.2.12. - [Release notes](https://github.com/kyverno/action-install-chainsaw/releases) - [Commits](https://github.com/kyverno/action-install-chainsaw/compare/v0.2.11...v0.2.12) --- updated-dependencies: - dependency-name: kyverno/action-install-chainsaw dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 90f6adf25..3e403756e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -216,7 +216,7 @@ jobs: - name: Install Cosign uses: sigstore/cosign-installer@v3.7.0 - name: Install chainsaw - uses: kyverno/action-install-chainsaw@v0.2.11 + uses: kyverno/action-install-chainsaw@v0.2.12 with: verify: true - name: Check install From 25aab8a8b91a19d1ddf389f37256fe7ea473fe43 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 17:05:16 +0800 Subject: [PATCH 15/28] feat: enhance RedisReplication controller and CRD with additional status columns and refactor reconciliation logic - Added new printer columns "Master" and "Age" to the RedisReplication CRD for better visibility of the master node and resource age. - Refactored the reconciliation logic in the RedisReplication controller to improve clarity and maintainability by introducing a reconciler struct for handling different reconciliation tasks. - Updated the e2e tests to validate the HA setup of Redis Replication and Sentinel, ensuring consistency in master IP across different sources. - Removed obsolete test files and replaced them with a new HA setup configuration. This update improves the usability and reliability of the Redis replication feature. Signed-off-by: drivebyer --- api/v1beta2/redisreplication_types.go | 2 + ...edis.opstreelabs.in_redisreplications.yaml | 9 +- .../redisreplication_controller.go | 154 +++++++++++++----- pkg/k8sutils/kube.go | 9 + .../v1beta2/setup/ha/chainsaw-test.yaml | 40 ++--- .../v1beta2/setup/ha/cli-pod.yaml | 15 -- .../setup/ha/{replication.yaml => ha.yaml} | 23 +++ .../v1beta2/setup/ha/sentinel.yaml | 23 --- 8 files changed, 174 insertions(+), 101 deletions(-) create mode 100644 pkg/k8sutils/kube.go delete mode 100644 tests/e2e-chainsaw/v1beta2/setup/ha/cli-pod.yaml rename tests/e2e-chainsaw/v1beta2/setup/ha/{replication.yaml => ha.yaml} (53%) delete mode 100644 tests/e2e-chainsaw/v1beta2/setup/ha/sentinel.yaml diff --git a/api/v1beta2/redisreplication_types.go b/api/v1beta2/redisreplication_types.go index 92915e76c..8b2510cc8 100644 --- a/api/v1beta2/redisreplication_types.go +++ b/api/v1beta2/redisreplication_types.go @@ -41,6 +41,8 @@ type RedisReplicationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Master",type="string",JSONPath=".status.masterNode" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // Redis is the Schema for the redis API type RedisReplication struct { diff --git a/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml b/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml index 48ba4d320..38a26b931 100644 --- a/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml +++ b/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml @@ -4397,7 +4397,14 @@ spec: storage: false subresources: status: {} - - name: v1beta2 + - additionalPrinterColumns: + - jsonPath: .status.masterNode + name: Master + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 schema: openAPIV3Schema: description: Redis is the Schema for the redis API diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 6b71bf56f..3959bd677 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -28,57 +28,37 @@ type Reconciler struct { } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx, "Request.Namespace", req.Namespace, "Request.Name", req.Name) instance := &redisv1beta2.RedisReplication{} - err := r.Client.Get(context.TODO(), req.NamespacedName, instance) + err := r.Client.Get(ctx, req.NamespacedName, instance) if err != nil { - return intctrlutil.RequeueWithErrorChecking(ctx, err, "") - } - if instance.ObjectMeta.GetDeletionTimestamp() != nil { - if err = k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") - } - return intctrlutil.Reconciled() - } - if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { - return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") - } - if err = k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") - } - err = k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient) - if err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") - } - err = k8sutils.CreateReplicationService(ctx, instance, r.K8sClient) - if err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") - } - if !r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name) { - return intctrlutil.Reconciled() + return intctrlutil.RequeueWithErrorChecking(ctx, err, "failed to get RedisReplication instance") } - var realMaster string - masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") - if len(masterNodes) > 1 { - logger.Info("Creating redis replication by executing replication creation commands") - slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) - if len(slaveNodes) == 0 { - realMaster = masterNodes[0] + var reconcilers []reconciler + if k8sutils.IsDeleted(instance) { + reconcilers = []reconciler{ + {typ: "finalizer", rec: r.reconcileFinalizer}, } - if err = k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, instance, masterNodes, realMaster); err != nil { - return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } else { + reconcilers = []reconciler{ + {typ: "annotation", rec: r.reconcileAnnotation}, + {typ: "statefulset", rec: r.reconcileStatefulSet}, + {typ: "service", rec: r.reconcileService}, + {typ: "redis", rec: r.reconcileRedis}, + {typ: "status", rec: r.reconcileStatus}, } } - realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) - if err = r.UpdateRedisReplicationMaster(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") - } - if err = r.UpdateRedisPodRoleLabel(ctx, instance, realMaster); err != nil { - return intctrlutil.RequeueWithError(ctx, err, "") + for _, reconciler := range reconcilers { + result, err := reconciler.rec(ctx, instance) + if err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") + } + if result.Requeue { + return result, nil + } } + return intctrlutil.RequeueAfter(ctx, time.Second*10, "") } @@ -86,6 +66,13 @@ func (r *Reconciler) UpdateRedisReplicationMaster(ctx context.Context, instance if instance.Status.MasterNode == masterNode { return nil } + + if instance.Status.MasterNode != masterNode { + logger := log.FromContext(ctx) + logger.Info("Updating master node", + "previous", instance.Status.MasterNode, + "new", masterNode) + } instance.Status.MasterNode = masterNode if err := r.Client.Status().Update(ctx, instance); err != nil { return err @@ -118,6 +105,89 @@ func (r *Reconciler) UpdateRedisPodRoleLabel(ctx context.Context, cr *redisv1bet return nil } +type reconciler struct { + typ string + rec func(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) +} + +func (r *Reconciler) reconcileFinalizer(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + if k8sutils.IsDeleted(instance) { + if err := k8sutils.HandleRedisReplicationFinalizer(ctx, r.Client, r.K8sClient, instance); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") + } + return intctrlutil.Reconciled() + } + if err := k8sutils.AddFinalizer(ctx, instance, k8sutils.RedisReplicationFinalizer, r.Client); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") + } + return intctrlutil.Reconciled() +} + +func (r *Reconciler) reconcileAnnotation(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + if _, found := instance.ObjectMeta.GetAnnotations()["redisreplication.opstreelabs.in/skip-reconcile"]; found { + return intctrlutil.RequeueAfter(ctx, time.Second*10, "found skip reconcile annotation") + } + return intctrlutil.Reconciled() +} + +func (r *Reconciler) reconcileStatefulSet(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + if err := k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } + return intctrlutil.Reconciled() +} + +func (r *Reconciler) reconcileService(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + if err := k8sutils.CreateReplicationService(ctx, instance, r.K8sClient); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } + return intctrlutil.Reconciled() +} + +func (r *Reconciler) reconcileRedis(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + logger := log.FromContext(ctx) + + if !r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name) { + logger.Info("StatefulSet not ready yet, requeuing", + "namespace", instance.Namespace, + "name", instance.Name) + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } + + var realMaster string + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") + if len(masterNodes) > 1 { + log.FromContext(ctx).Info("Creating redis replication by executing replication creation commands") + slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "slave") + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) + if len(slaveNodes) == 0 { + realMaster = masterNodes[0] + } + if err := k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, instance, masterNodes, realMaster); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } + } + return intctrlutil.Reconciled() +} + +// reconcileStatus update status and label. +func (r *Reconciler) reconcileStatus(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + var err error + var realMaster string + + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, instance, "master") + realMaster = k8sutils.GetRedisReplicationRealMaster(ctx, r.K8sClient, instance, masterNodes) + if err = r.UpdateRedisReplicationMaster(ctx, instance, realMaster); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") + } + if err = r.UpdateRedisPodRoleLabel(ctx, instance, realMaster); err != nil { + return intctrlutil.RequeueWithError(ctx, err, "") + } + return intctrlutil.Reconciled() +} + // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/pkg/k8sutils/kube.go b/pkg/k8sutils/kube.go new file mode 100644 index 000000000..f70056fb5 --- /dev/null +++ b/pkg/k8sutils/kube.go @@ -0,0 +1,9 @@ +package k8sutils + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func IsDeleted(obj client.Object) bool { + return obj.GetDeletionTimestamp() != nil +} diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml index 050f59672..74205ce69 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml @@ -1,3 +1,11 @@ +# This case is to test the HA setup of the Redis Replication and Sentinel +# It will create a Redis Replication and Sentinel, then terminate the Redis Replication master pod +# and check if the Sentinel can promote a new master pod. +# +# It check three place the same pod IP: +# 1. Status from RedisReplication +# 2. Label from RedisReplication +# 3. get-master-addr-by-name from Sentinel --- apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test @@ -7,25 +15,19 @@ spec: steps: - try: - apply: - file: replication.yaml - - apply: - file: sentinel.yaml - - create: - file: cli-pod.yaml + file: ha.yaml - - name: Sleep for 3 minutes + - name: Test Master IP consistency try: - sleep: - duration: 3m - - - name: Test sentinel monitoring - try: + duration: 180s - script: timeout: 10s content: | - export MASTER_IP_FROM_SENTINEL=$(kubectl exec --namespace ${NAMESPACE} redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); + export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod $(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}') -o jsonpath='{.status.podIP}'); + export MASTER_IP_FROM_SENTINEL=$(kubectl -n ${NAMESPACE} exec redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); export MASTER_IP_FROM_LABEL=$(kubectl -n ${NAMESPACE} get pod -l app=redis-replication,redis-role=master,redis_setup_type=replication -o jsonpath='{.items[0].status.podIP}'); - if [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_LABEL" ]; then echo "OK"; else echo "FAIL"; fi + if [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_LABEL" ] && [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_STATUS" ]; then echo "OK"; else echo "FAIL"; fi check: (contains($stdout, 'OK')): true @@ -35,20 +37,18 @@ spec: - script: timeout: 10s content: | - kubectl --namespace ${NAMESPACE} delete pod redis-replication-0 - - - name: Sleep for 5 minutes - try: + kubectl -n ${NAMESPACE} delete pod redis-replication-0 - sleep: - duration: 5m + duration: 30s - - name: Test sentinel monitoring + - name: Test Master IP consistency try: - script: timeout: 10s content: | - export MASTER_IP_FROM_SENTINEL=$(kubectl exec --namespace ${NAMESPACE} redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); + export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod $(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}') -o jsonpath='{.status.podIP}'); + export MASTER_IP_FROM_SENTINEL=$(kubectl -n ${NAMESPACE} exec redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); export MASTER_IP_FROM_LABEL=$(kubectl -n ${NAMESPACE} get pod -l app=redis-replication,redis-role=master,redis_setup_type=replication -o jsonpath='{.items[0].status.podIP}'); - if [ $MASTER_IP_FROM_SENTINEL = $MASTER_IP_FROM_LABEL ]; then echo "OK"; else echo "FAIL"; fi + if [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_LABEL" ] && [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_STATUS" ]; then echo "OK"; else echo "FAIL"; fi check: (contains($stdout, 'OK')): true diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/cli-pod.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/cli-pod.yaml deleted file mode 100644 index e3049d88b..000000000 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/cli-pod.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: redis - labels: - app: redis -spec: - containers: - - name: redis - image: redis:alpine - resources: - limits: - cpu: 200m - memory: 500Mi diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/replication.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/ha.yaml similarity index 53% rename from tests/e2e-chainsaw/v1beta2/setup/ha/replication.yaml rename to tests/e2e-chainsaw/v1beta2/setup/ha/ha.yaml index bf7c7e7b4..1ccd498bc 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/replication.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/ha.yaml @@ -25,3 +25,26 @@ spec: resources: requests: storage: 1Gi +--- +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisSentinel +metadata: + name: redis-sentinel +spec: + clusterSize: 1 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + redisSentinelConfig: + redisReplicationName: redis-replication + quorum: '1' + kubernetesConfig: + image: quay.io/opstree/redis-sentinel:latest + imagePullPolicy: Always + resources: + requests: + cpu: 101m + memory: 128Mi + limits: + cpu: 101m + memory: 128Mi diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/sentinel.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/sentinel.yaml deleted file mode 100644 index 994b5626a..000000000 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/sentinel.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: redis.redis.opstreelabs.in/v1beta2 -kind: RedisSentinel -metadata: - name: redis-sentinel -spec: - clusterSize: 1 - podSecurityContext: - runAsUser: 1000 - fsGroup: 1000 - redisSentinelConfig: - redisReplicationName: redis-replication - quorum: '1' - kubernetesConfig: - image: quay.io/opstree/redis-sentinel:latest - imagePullPolicy: Always - resources: - requests: - cpu: 101m - memory: 128Mi - limits: - cpu: 101m - memory: 128Mi From a8092ef6fe8876689f1614a56213e8927e4e4ecc Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 17:32:35 +0800 Subject: [PATCH 16/28] fix lint Signed-off-by: drivebyer --- tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml index 74205ce69..8d367dfed 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml @@ -24,7 +24,8 @@ spec: - script: timeout: 10s content: | - export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod $(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}') -o jsonpath='{.status.podIP}'); + export MASTER_POD_FROM_STATUS=$(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}'); + export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod ${MASTER_POD_FROM_STATUS} -o jsonpath='{.status.podIP}'); export MASTER_IP_FROM_SENTINEL=$(kubectl -n ${NAMESPACE} exec redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); export MASTER_IP_FROM_LABEL=$(kubectl -n ${NAMESPACE} get pod -l app=redis-replication,redis-role=master,redis_setup_type=replication -o jsonpath='{.items[0].status.podIP}'); if [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_LABEL" ] && [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_STATUS" ]; then echo "OK"; else echo "FAIL"; fi @@ -46,7 +47,8 @@ spec: - script: timeout: 10s content: | - export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod $(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}') -o jsonpath='{.status.podIP}'); + export MASTER_POD_FROM_STATUS=$(kubectl -n ${NAMESPACE} get redisreplication redis-replication -o jsonpath='{.status.masterNode}'); + export MASTER_IP_FROM_STATUS=$(kubectl -n ${NAMESPACE} get pod ${MASTER_POD_FROM_STATUS} -o jsonpath='{.status.podIP}'); export MASTER_IP_FROM_SENTINEL=$(kubectl -n ${NAMESPACE} exec redis-sentinel-sentinel-0 -- redis-cli -p 26379 sentinel get-master-addr-by-name myMaster | head -n 1); export MASTER_IP_FROM_LABEL=$(kubectl -n ${NAMESPACE} get pod -l app=redis-replication,redis-role=master,redis_setup_type=replication -o jsonpath='{.items[0].status.podIP}'); if [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_LABEL" ] && [ "$MASTER_IP_FROM_SENTINEL" = "$MASTER_IP_FROM_STATUS" ]; then echo "OK"; else echo "FAIL"; fi From 79febd30d6292686bf2054011865b5131f8cbe62 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 17:45:04 +0800 Subject: [PATCH 17/28] lint Signed-off-by: drivebyer --- tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml index 8d367dfed..eeb088e31 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml @@ -1,7 +1,6 @@ # This case is to test the HA setup of the Redis Replication and Sentinel # It will create a Redis Replication and Sentinel, then terminate the Redis Replication master pod # and check if the Sentinel can promote a new master pod. -# # It check three place the same pod IP: # 1. Status from RedisReplication # 2. Label from RedisReplication From 62995c50bce3ba90be4a166ad3aa53d72c2b1840 Mon Sep 17 00:00:00 2001 From: Abhishek Dubey Date: Tue, 10 Dec 2024 18:55:01 +0530 Subject: [PATCH 18/28] Updated LICENSE today --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index bbbec2b09..058ac3e7c 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [2020] [Opstree Solutions] + Copyright [2024] [Opstree Solutions] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 2d5e45c958b6ab1b6736c4875602090157ea2ac3 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 21:54:15 +0800 Subject: [PATCH 19/28] upgrade go --- .github/workflows/ci.yaml | 2 +- .golangci.yml | 2 +- Dockerfile | 2 +- go.mod | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 3e403756e..d6f301ae4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -11,7 +11,7 @@ permissions: contents: read env: - GOLANG_VERSION: 1.22 + GOLANG_VERSION: 1.23.4 APPLICATION_NAME: redis-operator DockerImagName: docker.io/opstree/redis-operator BuildDocs: true diff --git a/.golangci.yml b/.golangci.yml index 1ff28d4d5..8e6b32271 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,7 +47,7 @@ linters: run: timeout: 15m - go: "1.22" + go: "1.23.4" tests: true show-stats: true skip-files: diff --git a/Dockerfile b/Dockerfile index ae57deaee..b7554aa6e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.22 as builder +FROM golang:1.23-alpine as builder ARG BUILDOS ARG BUILDPLATFORM ARG BUILDARCH diff --git a/go.mod b/go.mod index a10f233c7..3e91767ec 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/OT-CONTAINER-KIT/redis-operator -go 1.22 +go 1.23.4 require ( github.com/avast/retry-go v3.0.0+incompatible From f59817cb3877296cad8a3ddd2b447103526fe818 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 21:58:57 +0800 Subject: [PATCH 20/28] add --- pkg/controllers/redisreplication/redisreplication_controller.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index 3959bd677..cca13842c 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -43,6 +43,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } else { reconcilers = []reconciler{ {typ: "annotation", rec: r.reconcileAnnotation}, + {typ: "finalizer", rec: r.reconcileFinalizer}, {typ: "statefulset", rec: r.reconcileStatefulSet}, {typ: "service", rec: r.reconcileService}, {typ: "redis", rec: r.reconcileRedis}, From 72d0e991c21c3edb972858ec0b692d03642426c4 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 22:25:42 +0800 Subject: [PATCH 21/28] disable Signed-off-by: drivebyer --- .golangci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 8e6b32271..2776e5505 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -39,7 +39,6 @@ linters: - tenv - thelper - tparallel - - typecheck - unconvert - unused - wastedassign From 0a920ea4074404ffe4ff6dc9a1bb2e1c5d39f852 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 22:31:27 +0800 Subject: [PATCH 22/28] update Signed-off-by: drivebyer --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index d6f301ae4..34d6eaf52 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -32,7 +32,7 @@ jobs: - name: Run GolangCI-Lint uses: golangci/golangci-lint-action@v6 with: - version: v1.54.0 + version: v1.62.2 gotest: needs: From 371f057cd1ab759d2a696e88b69769dd2bc931be Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 22:38:41 +0800 Subject: [PATCH 23/28] disable Signed-off-by: drivebyer --- .golangci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 2776e5505..ce2837880 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,7 +24,6 @@ linters: - gofmt - gofumpt - goprintffuncname - - gosec - gosimple - govet - grouper From a6454ab011a057a2d1ffc428dc2d7a0dbb89f1dd Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 23:21:43 +0800 Subject: [PATCH 24/28] update Signed-off-by: drivebyer --- tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml index eeb088e31..a97c96b03 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml @@ -39,7 +39,7 @@ spec: content: | kubectl -n ${NAMESPACE} delete pod redis-replication-0 - sleep: - duration: 30s + duration: 60s - name: Test Master IP consistency try: From 60ef5a22412895e4037e13d6a00b8f822e8988a7 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Tue, 10 Dec 2024 23:22:03 +0800 Subject: [PATCH 25/28] update Signed-off-by: drivebyer --- tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml index a97c96b03..2e4ee24e5 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/ha/chainsaw-test.yaml @@ -39,7 +39,7 @@ spec: content: | kubectl -n ${NAMESPACE} delete pod redis-replication-0 - sleep: - duration: 60s + duration: 120s - name: Test Master IP consistency try: From b0fe42cc53a07e0ebff82030cf0caf6a708cbd13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 01:43:09 +0000 Subject: [PATCH 26/28] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.20.1 to 2.22.0 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.20.1 to 2.22.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.20.1...v2.22.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 3e91767ec..a99745e3f 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/banzaicloud/k8s-objectmatcher v1.8.0 github.com/go-logr/logr v1.4.2 github.com/go-redis/redismock/v9 v9.2.0 - github.com/onsi/ginkgo/v2 v2.20.1 + github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 github.com/redis/go-redis/v9 v9.7.0 @@ -40,7 +40,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -67,7 +67,7 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.35.1 // indirect diff --git a/go.sum b/go.sum index ca3356138..c100e7740 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -153,8 +153,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo= -github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= @@ -269,8 +269,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 683258976ebea6fd6e62528133211fdb948fd8b9 Mon Sep 17 00:00:00 2001 From: yangw Date: Wed, 11 Dec 2024 09:59:44 +0800 Subject: [PATCH 27/28] chore: update dependabot configuration to change update schedule from daily to monthly for gomod and github-actions Signed-off-by: yangw --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0476c5b94..fe405aaaa 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,8 +3,8 @@ updates: - package-ecosystem: gomod directory: / schedule: - interval: daily + interval: monthly - package-ecosystem: github-actions directory: / schedule: - interval: daily \ No newline at end of file + interval: monthly \ No newline at end of file From cda99a4c01c002b2d9ee2be141c053e4b821f506 Mon Sep 17 00:00:00 2001 From: Husni Alhamdani Date: Mon, 16 Dec 2024 13:09:31 +0700 Subject: [PATCH 28/28] feat: support PDB in redisreplication (#1166) **Description** Support PDB in redisreplication **Type of change** * Bug fix (non-breaking change which fixes an issue) * New feature (non-breaking change which adds functionality) * Breaking change (fix or feature that would cause existing functionality to not work as expected) **Checklist** - [ ] Tests have been added/modified and all tests pass. - [ ] Functionality/bugs have been confirmed to be unchanged or fixed. - [x] I have performed a self-review of my own code. - [ ] Documentation has been updated or added where necessary. **Additional Context** --- api/v1beta2/redisreplication_types.go | 42 ++++++------- api/v1beta2/zz_generated.deepcopy.go | 5 ++ ...edis.opstreelabs.in_redisreplications.yaml | 13 ++++ .../redisreplication_controller.go | 8 +++ pkg/k8sutils/poddisruption.go | 59 +++++++++++++++++-- 5 files changed, 102 insertions(+), 25 deletions(-) diff --git a/api/v1beta2/redisreplication_types.go b/api/v1beta2/redisreplication_types.go index 8b2510cc8..0f19ce3a1 100644 --- a/api/v1beta2/redisreplication_types.go +++ b/api/v1beta2/redisreplication_types.go @@ -1,31 +1,33 @@ package v1beta2 import ( + common "github.com/OT-CONTAINER-KIT/redis-operator/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type RedisReplicationSpec struct { - Size *int32 `json:"clusterSize"` - KubernetesConfig KubernetesConfig `json:"kubernetesConfig"` - RedisExporter *RedisExporter `json:"redisExporter,omitempty"` - RedisConfig *RedisConfig `json:"redisConfig,omitempty"` - Storage *Storage `json:"storage,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` - SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` - PriorityClassName string `json:"priorityClassName,omitempty"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - Tolerations *[]corev1.Toleration `json:"tolerations,omitempty"` - TLS *TLSConfig `json:"TLS,omitempty"` - ACL *ACLConfig `json:"acl,omitempty"` - ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,12,opt,name=livenessProbe"` - InitContainer *InitContainer `json:"initContainer,omitempty"` - Sidecars *[]Sidecar `json:"sidecars,omitempty"` - ServiceAccountName *string `json:"serviceAccountName,omitempty"` - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - EnvVars *[]corev1.EnvVar `json:"env,omitempty"` + Size *int32 `json:"clusterSize"` + KubernetesConfig KubernetesConfig `json:"kubernetesConfig"` + RedisExporter *RedisExporter `json:"redisExporter,omitempty"` + RedisConfig *RedisConfig `json:"redisConfig,omitempty"` + Storage *Storage `json:"storage,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + Tolerations *[]corev1.Toleration `json:"tolerations,omitempty"` + TLS *TLSConfig `json:"TLS,omitempty"` + PodDisruptionBudget *common.RedisPodDisruptionBudget `json:"pdb,omitempty"` + ACL *ACLConfig `json:"acl,omitempty"` + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,12,opt,name=livenessProbe"` + InitContainer *InitContainer `json:"initContainer,omitempty"` + Sidecars *[]Sidecar `json:"sidecars,omitempty"` + ServiceAccountName *string `json:"serviceAccountName,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + EnvVars *[]corev1.EnvVar `json:"env,omitempty"` } func (cr *RedisReplicationSpec) GetReplicationCounts(t string) int32 { diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 55493e1d9..331905267 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -593,6 +593,11 @@ func (in *RedisReplicationSpec) DeepCopyInto(out *RedisReplicationSpec) { *out = new(TLSConfig) (*in).DeepCopyInto(*out) } + if in.PodDisruptionBudget != nil { + in, out := &in.PodDisruptionBudget, &out.PodDisruptionBudget + *out = new(api.RedisPodDisruptionBudget) + (*in).DeepCopyInto(*out) + } if in.ACL != nil { in, out := &in.ACL, &out.ACL *out = new(ACLConfig) diff --git a/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml b/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml index 38a26b931..3dd86791c 100644 --- a/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml +++ b/config/crd/bases/redis.redis.opstreelabs.in_redisreplications.yaml @@ -6240,6 +6240,19 @@ spec: additionalProperties: type: string type: object + pdb: + description: RedisPodDisruptionBudget configure a PodDisruptionBudget + on the resource (leader/follower) + properties: + enabled: + type: boolean + maxUnavailable: + format: int32 + type: integer + minAvailable: + format: int32 + type: integer + type: object podSecurityContext: description: |- PodSecurityContext holds pod-level security attributes and common container settings. diff --git a/pkg/controllers/redisreplication/redisreplication_controller.go b/pkg/controllers/redisreplication/redisreplication_controller.go index cca13842c..0e3e0f433 100644 --- a/pkg/controllers/redisreplication/redisreplication_controller.go +++ b/pkg/controllers/redisreplication/redisreplication_controller.go @@ -46,6 +46,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu {typ: "finalizer", rec: r.reconcileFinalizer}, {typ: "statefulset", rec: r.reconcileStatefulSet}, {typ: "service", rec: r.reconcileService}, + {typ: "poddisruptionbudget", rec: r.reconcilePDB}, {typ: "redis", rec: r.reconcileRedis}, {typ: "status", rec: r.reconcileStatus}, } @@ -131,6 +132,13 @@ func (r *Reconciler) reconcileAnnotation(ctx context.Context, instance *redisv1b return intctrlutil.Reconciled() } +func (r *Reconciler) reconcilePDB(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { + if err := k8sutils.ReconcileReplicationPodDisruptionBudget(ctx, instance, instance.Spec.PodDisruptionBudget, r.K8sClient); err != nil { + return intctrlutil.RequeueAfter(ctx, time.Second*60, "") + } + return intctrlutil.Reconciled() +} + func (r *Reconciler) reconcileStatefulSet(ctx context.Context, instance *redisv1beta2.RedisReplication) (ctrl.Result, error) { if err := k8sutils.CreateReplicationRedis(ctx, instance, r.K8sClient); err != nil { return intctrlutil.RequeueAfter(ctx, time.Second*60, "") diff --git a/pkg/k8sutils/poddisruption.go b/pkg/k8sutils/poddisruption.go index 6dc97a970..69bad44cd 100644 --- a/pkg/k8sutils/poddisruption.go +++ b/pkg/k8sutils/poddisruption.go @@ -26,7 +26,7 @@ func ReconcileRedisPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.Red return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) + _, err := getPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { @@ -48,7 +48,29 @@ func ReconcileSentinelPodDisruptionBudget(ctx context.Context, cr *redisv1beta2. return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) } else { // Check if one exists, and delete it. - _, err := GetPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) + _, err := getPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) + if err == nil { + return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) + } else if err != nil && errors.IsNotFound(err) { + log.FromContext(ctx).V(1).Info("Reconciliation Successful, no PodDisruptionBudget Found.") + // Its ok if its not found, as we're deleting anyway + return nil + } + return err + } +} + +func ReconcileReplicationPodDisruptionBudget(ctx context.Context, cr *redisv1beta2.RedisReplication, pdbParams *commonapi.RedisPodDisruptionBudget, cl kubernetes.Interface) error { + pdbName := cr.ObjectMeta.Name + "-replication" + if pdbParams != nil && pdbParams.Enabled { + labels := getRedisLabels(cr.ObjectMeta.Name, replication, "replication", cr.GetObjectMeta().GetLabels()) + annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) + pdbMeta := generateObjectMetaInformation(pdbName, cr.Namespace, labels, annotations) + pdbDef := generateReplicationPodDisruptionBudgetDef(ctx, cr, "replication", pdbMeta, pdbParams) + return CreateOrUpdatePodDisruptionBudget(ctx, pdbDef, cl) + } else { + // Check if one exists, and delete it. + _, err := getPodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) if err == nil { return deletePodDisruptionBudget(ctx, cr.Namespace, pdbName, cl) } else if err != nil && errors.IsNotFound(err) { @@ -87,6 +109,33 @@ func generatePodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisC return pdbTemplate } +// generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition +func generateReplicationPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisReplication, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { + lblSelector := LabelSelectors(map[string]string{ + "app": fmt.Sprintf("%s-%s", cr.ObjectMeta.Name, role), + "role": role, + }) + pdbTemplate := &policyv1.PodDisruptionBudget{ + TypeMeta: generateMetaInformation("PodDisruptionBudget", "policy/v1"), + ObjectMeta: pdbMeta, + Spec: policyv1.PodDisruptionBudgetSpec{ + Selector: lblSelector, + }, + } + if pdbParams.MinAvailable != nil { + pdbTemplate.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: *pdbParams.MinAvailable} + } + if pdbParams.MaxUnavailable != nil { + pdbTemplate.Spec.MaxUnavailable = &intstr.IntOrString{Type: intstr.Int, IntVal: *pdbParams.MaxUnavailable} + } + // If we don't have a value for either, assume quorum: (N/2)+1 + if pdbTemplate.Spec.MaxUnavailable == nil && pdbTemplate.Spec.MinAvailable == nil { + pdbTemplate.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: (*cr.Spec.Size / 2) + 1} + } + AddOwnerRefToObject(pdbTemplate, redisReplicationAsOwner(cr)) + return pdbTemplate +} + // generatePodDisruptionBudgetDef will create a PodDisruptionBudget definition func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta2.RedisSentinel, role string, pdbMeta metav1.ObjectMeta, pdbParams *commonapi.RedisPodDisruptionBudget) *policyv1.PodDisruptionBudget { lblSelector := LabelSelectors(map[string]string{ @@ -116,7 +165,7 @@ func generateSentinelPodDisruptionBudgetDef(ctx context.Context, cr *redisv1beta // CreateOrUpdateService method will create or update Redis service func CreateOrUpdatePodDisruptionBudget(ctx context.Context, pdbDef *policyv1.PodDisruptionBudget, cl kubernetes.Interface) error { - storedPDB, err := GetPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef.Name, cl) + storedPDB, err := getPodDisruptionBudget(ctx, pdbDef.Namespace, pdbDef.Name, cl) if err != nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(pdbDef); err != nil { //nolint log.FromContext(ctx).Error(err, "Unable to patch redis PodDisruptionBudget with comparison object") @@ -204,8 +253,8 @@ func deletePodDisruptionBudget(ctx context.Context, namespace string, pdbName st return nil } -// GetPodDisruptionBudget is a method to get PodDisruptionBudgets in Kubernetes -func GetPodDisruptionBudget(ctx context.Context, namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { +// getPodDisruptionBudget is a method to get PodDisruptionBudgets in Kubernetes +func getPodDisruptionBudget(ctx context.Context, namespace string, pdb string, cl kubernetes.Interface) (*policyv1.PodDisruptionBudget, error) { getOpts := metav1.GetOptions{ TypeMeta: generateMetaInformation("PodDisruptionBudget", "policy/v1"), }