From cfe9d382523ae6ae0a911f306b796562334322b8 Mon Sep 17 00:00:00 2001 From: Samuel Vijaykumar M Date: Thu, 18 Aug 2022 14:36:04 +0530 Subject: [PATCH] TopologySpreadContraints, Custom Port, Ping User (#1) * Fixing tests * Create my branch * WIP: Changes to makefile * WIP: Update the redis-exporter images * Pass the right shutdown command * Adding the containersecuritycontext * WIP: Add topologySpreadConstraints * WIP: TopologySpreadConstraints * WIP: Port changes * WIP: Port change * WIP: Adding port to readiness script * WIP: New deployment * WIP: Port changes * WIP: Port change liveness and exporter * WIP: Ensure port is passed * WIP: Updated the image reference * WIP: Residual comment * Making PR Ready(WIP) --- .gitignore | 1 + api/redisfailover/v1/defaults.go | 6 +- api/redisfailover/v1/types.go | 89 +- api/redisfailover/v1/validate.go | 7 +- api/redisfailover/v1/validate_test.go | 1 + example/kustomize/base/kustomization.yaml | 5 + example/kustomize/base/rf.yaml | 68 ++ .../overlays/multiple/kustomization.yaml | 32 + example/kustomize/overlays/multiple/port.yaml | 8 + .../kustomize/overlays/multiple/replicas.yaml | 7 + .../overlays/multiple/resources.yaml | 13 + .../overlays/ports/kustomization.yaml | 30 + example/kustomize/overlays/ports/port.yaml | 8 + .../kustomize/overlays/ports/resources.yaml | 19 + .../overlays/resources/kustomization.yaml | 30 + .../overlays/resources/resources.yaml | 13 + example/operator/kustomization.yaml | 16 + ...atabases.spotahome.com_redisfailovers.yaml | 1027 +++++++++++++++++ mocks/service/redis/Client.go | 62 +- operator/redisfailover/checker.go | 15 +- operator/redisfailover/checker_test.go | 4 +- operator/redisfailover/service/check.go | 23 +- operator/redisfailover/service/check_test.go | 34 +- operator/redisfailover/service/generator.go | 153 ++- operator/redisfailover/service/heal.go | 22 +- operator/redisfailover/service/heal_test.go | 31 +- service/redis/client.go | 32 +- .../redisfailover/creation_test.go | 4 +- 28 files changed, 1551 insertions(+), 209 deletions(-) create mode 100644 example/kustomize/base/kustomization.yaml create mode 100644 example/kustomize/base/rf.yaml create mode 100644 example/kustomize/overlays/multiple/kustomization.yaml create mode 100644 example/kustomize/overlays/multiple/port.yaml create mode 100644 example/kustomize/overlays/multiple/replicas.yaml create mode 100644 example/kustomize/overlays/multiple/resources.yaml create mode 100644 example/kustomize/overlays/ports/kustomization.yaml create mode 100644 example/kustomize/overlays/ports/port.yaml create mode 100644 example/kustomize/overlays/ports/resources.yaml create mode 100644 example/kustomize/overlays/resources/kustomization.yaml create mode 100644 example/kustomize/overlays/resources/resources.yaml create mode 100644 example/operator/kustomization.yaml diff --git a/.gitignore b/.gitignore index 692042ff4..6e2159f6d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /bin .bash_history .vscode +/tmp \ No newline at end of file diff --git a/api/redisfailover/v1/defaults.go b/api/redisfailover/v1/defaults.go index c3df011d9..cc8259141 100644 --- a/api/redisfailover/v1/defaults.go +++ b/api/redisfailover/v1/defaults.go @@ -3,10 +3,10 @@ package v1 const ( defaultRedisNumber = 3 defaultSentinelNumber = 3 - defaultSentinelExporterImage = "quay.io/oliver006/redis_exporter:v1.33.0-alpine" - defaultExporterImage = "quay.io/oliver006/redis_exporter:v1.33.0-alpine" + defaultSentinelExporterImage = "quay.io/oliver006/redis_exporter:v1.43.0" + defaultExporterImage = "quay.io/oliver006/redis_exporter:v1.43.0" defaultImage = "redis:6.2.6-alpine" - defaultRedisPort = "6379" + defaultRedisPort = 6379 ) var ( diff --git a/api/redisfailover/v1/types.go b/api/redisfailover/v1/types.go index 157f036d1..fb298b776 100644 --- a/api/redisfailover/v1/types.go +++ b/api/redisfailover/v1/types.go @@ -37,53 +37,56 @@ type RedisCommandRename struct { // RedisSettings defines the specification of the redis cluster type RedisSettings struct { - Image string `json:"image,omitempty"` - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - Replicas int32 `json:"replicas,omitempty"` - Resources corev1.ResourceRequirements `json:"resources,omitempty"` - CustomConfig []string `json:"customConfig,omitempty"` - CustomCommandRenames []RedisCommandRename `json:"customCommandRenames,omitempty"` - Command []string `json:"command,omitempty"` - ShutdownConfigMap string `json:"shutdownConfigMap,omitempty"` - Storage RedisStorage `json:"storage,omitempty"` - Exporter RedisExporter `json:"exporter,omitempty"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` - ContainerSecurityContext *corev1.SecurityContext `json:"containerSecurityContext,omitempty"` - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - PodAnnotations map[string]string `json:"podAnnotations,omitempty"` - ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - HostNetwork bool `json:"hostNetwork,omitempty"` - DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` - PriorityClassName string `json:"priorityClassName,omitempty"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` - TerminationGracePeriodSeconds int64 `json:"terminationGracePeriod,omitempty"` + Image string `json:"image,omitempty"` + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Port int32 `json:"port,omitempty"` + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + CustomConfig []string `json:"customConfig,omitempty"` + CustomCommandRenames []RedisCommandRename `json:"customCommandRenames,omitempty"` + Command []string `json:"command,omitempty"` + ShutdownConfigMap string `json:"shutdownConfigMap,omitempty"` + Storage RedisStorage `json:"storage,omitempty"` + Exporter RedisExporter `json:"exporter,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + ContainerSecurityContext *corev1.SecurityContext `json:"containerSecurityContext,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty"` + DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + TerminationGracePeriodSeconds int64 `json:"terminationGracePeriod,omitempty"` } // SentinelSettings defines the specification of the sentinel cluster type SentinelSettings struct { - Image string `json:"image,omitempty"` - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - Replicas int32 `json:"replicas,omitempty"` - Resources corev1.ResourceRequirements `json:"resources,omitempty"` - CustomConfig []string `json:"customConfig,omitempty"` - Command []string `json:"command,omitempty"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` - ContainerSecurityContext *corev1.SecurityContext `json:"containerSecurityContext,omitempty"` - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - PodAnnotations map[string]string `json:"podAnnotations,omitempty"` - ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - Exporter SentinelExporter `json:"exporter,omitempty"` - ConfigCopy SentinelConfigCopy `json:"configCopy,omitempty"` - HostNetwork bool `json:"hostNetwork,omitempty"` - DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` - PriorityClassName string `json:"priorityClassName,omitempty"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` + Image string `json:"image,omitempty"` + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + CustomConfig []string `json:"customConfig,omitempty"` + Command []string `json:"command,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + ContainerSecurityContext *corev1.SecurityContext `json:"containerSecurityContext,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + Exporter SentinelExporter `json:"exporter,omitempty"` + ConfigCopy SentinelConfigCopy `json:"configCopy,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty"` + DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` } // AuthSettings contains settings about auth diff --git a/api/redisfailover/v1/validate.go b/api/redisfailover/v1/validate.go index 3b38d54ab..3f28b2557 100644 --- a/api/redisfailover/v1/validate.go +++ b/api/redisfailover/v1/validate.go @@ -3,6 +3,7 @@ package v1 import ( "errors" "fmt" + "strconv" ) const ( @@ -22,7 +23,7 @@ func (r *RedisFailover) Validate() error { } if r.Spec.BootstrapNode.Port == "" { - r.Spec.BootstrapNode.Port = defaultRedisPort + r.Spec.BootstrapNode.Port = strconv.Itoa(defaultRedisPort) } initialRedisCustomConfig = bootstrappingRedisCustomConfig } @@ -45,6 +46,10 @@ func (r *RedisFailover) Validate() error { r.Spec.Sentinel.Replicas = defaultSentinelNumber } + if r.Spec.Redis.Port <= 0 { + r.Spec.Redis.Port = defaultRedisPort + } + if r.Spec.Redis.Exporter.Image == "" { r.Spec.Redis.Exporter.Image = defaultExporterImage } diff --git a/api/redisfailover/v1/validate_test.go b/api/redisfailover/v1/validate_test.go index f130b59bf..0cc8302de 100644 --- a/api/redisfailover/v1/validate_test.go +++ b/api/redisfailover/v1/validate_test.go @@ -104,6 +104,7 @@ func TestValidate(t *testing.T) { Redis: RedisSettings{ Image: defaultImage, Replicas: defaultRedisNumber, + Port: defaultRedisPort, Exporter: RedisExporter{ Image: defaultExporterImage, }, diff --git a/example/kustomize/base/kustomization.yaml b/example/kustomize/base/kustomization.yaml new file mode 100644 index 000000000..64759d791 --- /dev/null +++ b/example/kustomize/base/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- rf.yaml diff --git a/example/kustomize/base/rf.yaml b/example/kustomize/base/rf.yaml new file mode 100644 index 000000000..4c1c7992e --- /dev/null +++ b/example/kustomize/base/rf.yaml @@ -0,0 +1,68 @@ +# The base ensures the following +# - Redis has three replicas +# - Sentinel has three replicas +# - A label whitelist `redis.failover.io` to propogate additional info +# - Redis is distributed evenly across AZs(Ref: topologySpreadConstraints) +# - Sentinel is distributed evenly across AZs(Ref: topologySpreadConstraints) +# - Redis and their Sentinels do not at any given time coexist(Ref: affinity.podAntiAffinity) +--- +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis + labels: + redis.failover.io/tenant: base +spec: + sentinel: + replicas: 3 + exporter: + enabled: true + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/component: sentinel + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + - labelSelector: + matchLabels: + app.kubernetes.io/component: sentinel + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: redis + redis.failover.io/tenant: base + topologyKey: "kubernetes.io/hostname" + redis: + replicas: 3 + exporter: + enabled: true + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/component: redis + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + - labelSelector: + matchLabels: + app.kubernetes.io/component: redis + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: sentinel + redis.failover.io/tenant: base + topologyKey: "kubernetes.io/hostname" + terminationGracePeriod: 100 + labelWhitelist: + - ^redis.failover.io.* diff --git a/example/kustomize/overlays/multiple/kustomization.yaml b/example/kustomize/overlays/multiple/kustomization.yaml new file mode 100644 index 000000000..882e00126 --- /dev/null +++ b/example/kustomize/overlays/multiple/kustomization.yaml @@ -0,0 +1,32 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- "../../base" + +patches: +- resources.yaml +- replicas.yaml +- port.yaml + +commonLabels: + redis.failover.io/tenant: multiple + redis.failover.io/product: testing + +nameSuffix: "-multiple" + +namespace: multiple + +patchesJson6902: + - target: + group: databases.spotahome.com + version: v1 + kind: RedisFailover + name: redis + patch: |- + - op: replace + path: /spec/redis/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: multiple + - op: replace + path: /spec/sentinel/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: multiple diff --git a/example/kustomize/overlays/multiple/port.yaml b/example/kustomize/overlays/multiple/port.yaml new file mode 100644 index 000000000..3b3b49234 --- /dev/null +++ b/example/kustomize/overlays/multiple/port.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + redis: + port: 12345 diff --git a/example/kustomize/overlays/multiple/replicas.yaml b/example/kustomize/overlays/multiple/replicas.yaml new file mode 100644 index 000000000..23e69ba36 --- /dev/null +++ b/example/kustomize/overlays/multiple/replicas.yaml @@ -0,0 +1,7 @@ +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + redis: + replicas: 5 diff --git a/example/kustomize/overlays/multiple/resources.yaml b/example/kustomize/overlays/multiple/resources.yaml new file mode 100644 index 000000000..0d2e7cc24 --- /dev/null +++ b/example/kustomize/overlays/multiple/resources.yaml @@ -0,0 +1,13 @@ +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + redis: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 500Mi diff --git a/example/kustomize/overlays/ports/kustomization.yaml b/example/kustomize/overlays/ports/kustomization.yaml new file mode 100644 index 000000000..9b0df282b --- /dev/null +++ b/example/kustomize/overlays/ports/kustomization.yaml @@ -0,0 +1,30 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- "../../base" + +commonLabels: + redis.failover.io/tenant: ports + redis.failover.io/product: testing + +nameSuffix: "-ports" + +namespace: ports +patches: +- port.yaml +- resources.yaml + +patchesJson6902: + - target: + group: databases.spotahome.com + version: v1 + kind: RedisFailover + name: redis + patch: |- + - op: replace + path: /spec/redis/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: ports + - op: replace + path: /spec/sentinel/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: ports diff --git a/example/kustomize/overlays/ports/port.yaml b/example/kustomize/overlays/ports/port.yaml new file mode 100644 index 000000000..3b3b49234 --- /dev/null +++ b/example/kustomize/overlays/ports/port.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + redis: + port: 12345 diff --git a/example/kustomize/overlays/ports/resources.yaml b/example/kustomize/overlays/ports/resources.yaml new file mode 100644 index 000000000..cf85cee20 --- /dev/null +++ b/example/kustomize/overlays/ports/resources.yaml @@ -0,0 +1,19 @@ +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + sentinel: + resources: + requests: + cpu: 100m + limits: + memory: 100Mi + redis: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 500Mi diff --git a/example/kustomize/overlays/resources/kustomization.yaml b/example/kustomize/overlays/resources/kustomization.yaml new file mode 100644 index 000000000..738a79ff0 --- /dev/null +++ b/example/kustomize/overlays/resources/kustomization.yaml @@ -0,0 +1,30 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- "../../base" + +patches: +- resources.yaml + +commonLabels: + redis.failover.io/tenant: resources + redis.failover.io/product: testing + +nameSuffix: "-resources" + +namespace: resources + +patchesJson6902: + - target: + group: databases.spotahome.com + version: v1 + kind: RedisFailover + name: redis + patch: |- + - op: replace + path: /spec/redis/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: resources + - op: replace + path: /spec/sentinel/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/0/labelSelector/matchLabels/redis.failover.io~1tenant + value: resources diff --git a/example/kustomize/overlays/resources/resources.yaml b/example/kustomize/overlays/resources/resources.yaml new file mode 100644 index 000000000..0d2e7cc24 --- /dev/null +++ b/example/kustomize/overlays/resources/resources.yaml @@ -0,0 +1,13 @@ +apiVersion: databases.spotahome.com/v1 +kind: RedisFailover +metadata: + name: redis +spec: + redis: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 500Mi diff --git a/example/operator/kustomization.yaml b/example/operator/kustomization.yaml new file mode 100644 index 000000000..448ead9ed --- /dev/null +++ b/example/operator/kustomization.yaml @@ -0,0 +1,16 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# NOTE: For testing you local change you could uncomment and override the following +# images: +# - name: quay.io/spotahome/redis-operator +# newName: your-repo/redis-operator +# newTag: your-tag + +resources: +- "rolebinding.yaml" +- "roles.yaml" +- "operator.yaml" +- "serviceaccount.yaml" + +namespace: redis-failover \ No newline at end of file diff --git a/manifests/databases.spotahome.com_redisfailovers.yaml b/manifests/databases.spotahome.com_redisfailovers.yaml index 1c0990f50..ed40f69fb 100644 --- a/manifests/databases.spotahome.com_redisfailovers.yaml +++ b/manifests/databases.spotahome.com_redisfailovers.yaml @@ -945,6 +945,166 @@ spec: items: type: string type: array + containerSecurityContext: + description: SecurityContext holds security configuration that + will be applied to a container. Some fields are present in both + SecurityContext and PodSecurityContext. When both are set, + the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object customCommandRenames: items: description: RedisCommandRename defines the specification of @@ -971,6 +1131,171 @@ spec: items: type: string type: array + containerSecurityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields are present + in both SecurityContext and PodSecurityContext. When both + are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag + will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount + to use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not run + as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be + performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the + container. If unspecified, the container runtime will + allocate a random SELinux context for each container. May + also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. The + profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's + configured seccomp profile location. Must only be + set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: \n Localhost + - a profile defined in a file on the node should + be used. RuntimeDefault - the container runtime + default profile should be used. Unconfined - no + profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options from the + PodSecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. This + field is alpha-level and will only be honored by + components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature + flag will result in errors when validating the Pod. + All of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object enabled: type: boolean env: @@ -1148,6 +1473,9 @@ spec: additionalProperties: type: string type: object + port: + format: int32 + type: integer priorityClassName: type: string replicas: @@ -1713,6 +2041,108 @@ spec: type: string type: object type: array + topologySpreadConstraints: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. For example, in a 3-zone cluster, MaxSkew is + set to 1, and pods with the same labelSelector spread + as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | + - if MaxSkew is 1, incoming pod can only be scheduled + to zone3 to become 1/1/1; scheduling it onto zone1(zone2) + would make the ActualSkew(2-0) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to + topologies that would help reduce the skew. A constraint + is considered "Unsatisfiable" for an incoming pod if and + only if every possible node assigment for that pod would + violate "MaxSkew" on some topology. For example, in a + 3-zone cluster, MaxSkew is set to 1, and pods with the + same labelSelector spread as 3/1/1: | zone1 | zone2 | + zone3 | | P P P | P | P | If WhenUnsatisfiable + is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) + on zone2(zone3) satisfies MaxSkew(1). In other words, + the cluster can still be imbalanced, but scheduler won''t + make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object sentinel: description: SentinelSettings defines the specification of the sentinel @@ -2587,6 +3017,336 @@ spec: items: type: string type: array + configCopy: + description: SentinelConfigCopy defines the specification for + the sentinel exporter + properties: + containerSecurityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields are present + in both SecurityContext and PodSecurityContext. When both + are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag + will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount + to use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not run + as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be + performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the + container. If unspecified, the container runtime will + allocate a random SELinux context for each container. May + also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. The + profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's + configured seccomp profile location. Must only be + set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: \n Localhost + - a profile defined in a file on the node should + be used. RuntimeDefault - the container runtime + default profile should be used. Unconfined - no + profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options from the + PodSecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. This + field is alpha-level and will only be honored by + components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature + flag will result in errors when validating the Pod. + All of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + type: object + containerSecurityContext: + description: SecurityContext holds security configuration that + will be applied to a container. Some fields are present in both + SecurityContext and PodSecurityContext. When both are set, + the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object customConfig: items: type: string @@ -2602,6 +3362,171 @@ spec: items: type: string type: array + containerSecurityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields are present + in both SecurityContext and PodSecurityContext. When both + are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag + will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount + to use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not run + as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be + performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the + container. If unspecified, the container runtime will + allocate a random SELinux context for each container. May + also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. The + profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's + configured seccomp profile location. Must only be + set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: \n Localhost + - a profile defined in a file on the node should + be used. RuntimeDefault - the container runtime + default profile should be used. Unconfined - no + profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options from the + PodSecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. This + field is alpha-level and will only be honored by + components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature + flag will result in errors when validating the Pod. + All of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object enabled: type: boolean env: @@ -3019,6 +3944,108 @@ spec: type: string type: object type: array + topologySpreadConstraints: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. For example, in a 3-zone cluster, MaxSkew is + set to 1, and pods with the same labelSelector spread + as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | + - if MaxSkew is 1, incoming pod can only be scheduled + to zone3 to become 1/1/1; scheduling it onto zone1(zone2) + would make the ActualSkew(2-0) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to + topologies that would help reduce the skew. A constraint + is considered "Unsatisfiable" for an incoming pod if and + only if every possible node assigment for that pod would + violate "MaxSkew" on some topology. For example, in a + 3-zone cluster, MaxSkew is set to 1, and pods with the + same labelSelector spread as 3/1/1: | zone1 | zone2 | + zone3 | | P P P | P | P | If WhenUnsatisfiable + is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) + on zone2(zone3) satisfies MaxSkew(1). In other words, + the cluster can still be imbalanced, but scheduler won''t + make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object type: object required: diff --git a/mocks/service/redis/Client.go b/mocks/service/redis/Client.go index b8c5ae460..b262bf868 100644 --- a/mocks/service/redis/Client.go +++ b/mocks/service/redis/Client.go @@ -79,20 +79,20 @@ func (_m *Client) GetSentinelMonitor(ip string) (string, string, error) { return r0, r1, r2 } -// GetSlaveOf provides a mock function with given fields: ip, password -func (_m *Client) GetSlaveOf(ip string, password string) (string, error) { - ret := _m.Called(ip, password) +// GetSlaveOf provides a mock function with given fields: ip, port, password +func (_m *Client) GetSlaveOf(ip string, port string, password string) (string, error) { + ret := _m.Called(ip, port, password) var r0 string - if rf, ok := ret.Get(0).(func(string, string) string); ok { - r0 = rf(ip, password) + if rf, ok := ret.Get(0).(func(string, string, string) string); ok { + r0 = rf(ip, port, password) } else { r0 = ret.Get(0).(string) } var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(ip, password) + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(ip, port, password) } else { r1 = ret.Error(1) } @@ -100,20 +100,20 @@ func (_m *Client) GetSlaveOf(ip string, password string) (string, error) { return r0, r1 } -// IsMaster provides a mock function with given fields: ip, password -func (_m *Client) IsMaster(ip string, password string) (bool, error) { - ret := _m.Called(ip, password) +// IsMaster provides a mock function with given fields: ip, port, password +func (_m *Client) IsMaster(ip string, port string, password string) (bool, error) { + ret := _m.Called(ip, port, password) var r0 bool - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(ip, password) + if rf, ok := ret.Get(0).(func(string, string, string) bool); ok { + r0 = rf(ip, port, password) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(ip, password) + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(ip, port, password) } else { r1 = ret.Error(1) } @@ -121,13 +121,13 @@ func (_m *Client) IsMaster(ip string, password string) (bool, error) { return r0, r1 } -// MakeMaster provides a mock function with given fields: ip, password -func (_m *Client) MakeMaster(ip string, password string) error { - ret := _m.Called(ip, password) +// MakeMaster provides a mock function with given fields: ip, port, password +func (_m *Client) MakeMaster(ip string, port string, password string) error { + ret := _m.Called(ip, port, password) var r0 error - if rf, ok := ret.Get(0).(func(string, string) error); ok { - r0 = rf(ip, password) + if rf, ok := ret.Get(0).(func(string, string, string) error); ok { + r0 = rf(ip, port, password) } else { r0 = ret.Error(0) } @@ -205,13 +205,13 @@ func (_m *Client) ResetSentinel(ip string) error { return r0 } -// SetCustomRedisConfig provides a mock function with given fields: ip, configs, password -func (_m *Client) SetCustomRedisConfig(ip string, configs []string, password string) error { - ret := _m.Called(ip, configs, password) +// SetCustomRedisConfig provides a mock function with given fields: ip, port, configs, password +func (_m *Client) SetCustomRedisConfig(ip string, port string, configs []string, password string) error { + ret := _m.Called(ip, port, configs, password) var r0 error - if rf, ok := ret.Get(0).(func(string, []string, string) error); ok { - r0 = rf(ip, configs, password) + if rf, ok := ret.Get(0).(func(string, string, []string, string) error); ok { + r0 = rf(ip, port, configs, password) } else { r0 = ret.Error(0) } @@ -233,20 +233,20 @@ func (_m *Client) SetCustomSentinelConfig(ip string, configs []string) error { return r0 } -// SlaveIsReady provides a mock function with given fields: ip, password -func (_m *Client) SlaveIsReady(ip string, password string) (bool, error) { - ret := _m.Called(ip, password) +// SlaveIsReady provides a mock function with given fields: ip, port, password +func (_m *Client) SlaveIsReady(ip string, port string, password string) (bool, error) { + ret := _m.Called(ip, port, password) var r0 bool - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(ip, password) + if rf, ok := ret.Get(0).(func(string, string, string) bool); ok { + r0 = rf(ip, port, password) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(ip, password) + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(ip, port, password) } else { r1 = ret.Error(1) } diff --git a/operator/redisfailover/checker.go b/operator/redisfailover/checker.go index e54991dce..583d74628 100644 --- a/operator/redisfailover/checker.go +++ b/operator/redisfailover/checker.go @@ -2,6 +2,7 @@ package redisfailover import ( "errors" + "strconv" "time" redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" @@ -23,9 +24,9 @@ func (r *RedisFailoverHandler) UpdateRedisesPods(rf *redisfailoverv1.RedisFailov masterIP, _ = r.rfChecker.GetMasterIP(rf) } // No perform updates when nodes are syncing, still not connected, etc. - for _, rp := range redises { - if rp != masterIP { - ready, err := r.rfChecker.CheckRedisSlavesReady(rp, rf) + for _, rip := range redises { + if rip != masterIP { + ready, err := r.rfChecker.CheckRedisSlavesReady(rip, rf) if err != nil { return err } @@ -169,8 +170,10 @@ func (r *RedisFailoverHandler) CheckAndHeal(rf *redisfailoverv1.RedisFailover) e if err != nil { return err } + + port := getRedisPort(rf.Spec.Redis.Port) for _, sip := range sentinels { - if err := r.rfChecker.CheckSentinelMonitor(sip, master); err != nil { + if err := r.rfChecker.CheckSentinelMonitor(sip, master, port); err != nil { r.logger.Debug("Sentinel is not monitoring the correct master") if err := r.rfHealer.NewSentinelMonitor(sip, master, rf); err != nil { return err @@ -260,3 +263,7 @@ func (r *RedisFailoverHandler) checkAndHealSentinels(rf *redisfailoverv1.RedisFa } return nil } + +func getRedisPort(p int32) string { + return strconv.Itoa(int(p)) +} diff --git a/operator/redisfailover/checker_test.go b/operator/redisfailover/checker_test.go index d352fbf75..02031e724 100644 --- a/operator/redisfailover/checker_test.go +++ b/operator/redisfailover/checker_test.go @@ -344,14 +344,14 @@ func TestCheckAndHeal(t *testing.T) { if test.bootstrapping { mrfc.On("CheckSentinelMonitor", sentinel, bootstrapMaster, bootstrapMasterPort).Once().Return(nil) } else { - mrfc.On("CheckSentinelMonitor", sentinel, master).Once().Return(nil) + mrfc.On("CheckSentinelMonitor", sentinel, master, "0").Once().Return(nil) } } else { if test.bootstrapping { mrfc.On("CheckSentinelMonitor", sentinel, bootstrapMaster, bootstrapMasterPort).Once().Return(errors.New("")) mrfh.On("NewSentinelMonitorWithPort", sentinel, bootstrapMaster, bootstrapMasterPort, rf).Once().Return(nil) } else { - mrfc.On("CheckSentinelMonitor", sentinel, master).Once().Return(errors.New("")) + mrfc.On("CheckSentinelMonitor", sentinel, master, "0").Once().Return(errors.New("")) mrfh.On("NewSentinelMonitor", sentinel, master, rf).Once().Return(nil) } } diff --git a/operator/redisfailover/service/check.go b/operator/redisfailover/service/check.go index 17ffbe2f0..c6278cc6f 100644 --- a/operator/redisfailover/service/check.go +++ b/operator/redisfailover/service/check.go @@ -3,6 +3,7 @@ package service import ( "errors" "fmt" + "strconv" "time" appsv1 "k8s.io/api/apps/v1" @@ -86,8 +87,9 @@ func (r *RedisFailoverChecker) CheckAllSlavesFromMaster(master string, rf *redis return err } + rport := getRedisPort(rf.Spec.Redis.Port) for _, rip := range rips { - slave, err := r.redisClient.GetSlaveOf(rip, password) + slave, err := r.redisClient.GetSlaveOf(rip, rport, password) if err != nil { return err } @@ -150,8 +152,9 @@ func (r *RedisFailoverChecker) GetMasterIP(rf *redisfailoverv1.RedisFailover) (s } masters := []string{} + rport := getRedisPort(rf.Spec.Redis.Port) for _, rip := range rips { - master, err := r.redisClient.IsMaster(rip, password) + master, err := r.redisClient.IsMaster(rip, rport, password) if err != nil { return "", err } @@ -179,8 +182,9 @@ func (r *RedisFailoverChecker) GetNumberMasters(rf *redisfailoverv1.RedisFailove return nMasters, err } + rport := getRedisPort(rf.Spec.Redis.Port) for _, rip := range rips { - master, err := r.redisClient.IsMaster(rip, password) + master, err := r.redisClient.IsMaster(rip, rport, password) if err != nil { return nMasters, err } @@ -255,9 +259,10 @@ func (r *RedisFailoverChecker) GetRedisesSlavesPods(rf *redisfailoverv1.RedisFai return redises, err } + rport := getRedisPort(rf.Spec.Redis.Port) for _, rp := range rps.Items { if rp.Status.Phase == corev1.PodRunning && rp.DeletionTimestamp == nil { // Only work with running - master, err := r.redisClient.IsMaster(rp.Status.PodIP, password) + master, err := r.redisClient.IsMaster(rp.Status.PodIP, rport, password) if err != nil { return []string{}, err } @@ -281,9 +286,10 @@ func (r *RedisFailoverChecker) GetRedisesMasterPod(rFailover *redisfailoverv1.Re return "", err } + rport := getRedisPort(rFailover.Spec.Redis.Port) for _, rp := range rps.Items { if rp.Status.Phase == corev1.PodRunning && rp.DeletionTimestamp == nil { // Only work with running - master, err := r.redisClient.IsMaster(rp.Status.PodIP, password) + master, err := r.redisClient.IsMaster(rp.Status.PodIP, rport, password) if err != nil { return "", err } @@ -337,5 +343,10 @@ func (r *RedisFailoverChecker) CheckRedisSlavesReady(ip string, rFailover *redis return false, err } - return r.redisClient.SlaveIsReady(ip, password) + port := getRedisPort(rFailover.Spec.Redis.Port) + return r.redisClient.SlaveIsReady(ip, port, password) +} + +func getRedisPort(p int32) string { + return strconv.Itoa(int(p)) } diff --git a/operator/redisfailover/service/check_test.go b/operator/redisfailover/service/check_test.go index aca3ff0b1..fb6cef83f 100644 --- a/operator/redisfailover/service/check_test.go +++ b/operator/redisfailover/service/check_test.go @@ -182,7 +182,7 @@ func TestCheckAllSlavesFromMasterGetSlaveOfError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("GetSlaveOf", "", "").Once().Return("", errors.New("")) + mr.On("GetSlaveOf", "", "0", "").Once().Return("", errors.New("")) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -209,7 +209,7 @@ func TestCheckAllSlavesFromMasterDifferentMaster(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("GetSlaveOf", "0.0.0.0", "").Once().Return("1.1.1.1", nil) + mr.On("GetSlaveOf", "0.0.0.0", "0", "").Once().Return("1.1.1.1", nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -236,7 +236,7 @@ func TestCheckAllSlavesFromMaster(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("GetSlaveOf", "0.0.0.0", "").Once().Return("1.1.1.1", nil) + mr.On("GetSlaveOf", "0.0.0.0", "0", "").Once().Return("1.1.1.1", nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -462,7 +462,7 @@ func TestGetMasterIPIsMasterError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(false, errors.New("")) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(false, errors.New("")) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -495,8 +495,8 @@ func TestGetMasterIPMultipleMastersError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(true, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(true, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(true, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(true, nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -529,8 +529,8 @@ func TestGetMasterIP(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(true, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(false, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(true, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(false, nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -573,7 +573,7 @@ func TestGetNumberMastersIsMasterError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(true, errors.New("")) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(true, errors.New("")) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -606,8 +606,8 @@ func TestGetNumberMasters(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(true, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(false, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(true, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(false, nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -641,8 +641,8 @@ func TestGetNumberMastersTwo(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Once().Return(true, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(true, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Once().Return(true, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(true, nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) @@ -746,8 +746,8 @@ func TestGetRedisPodsNames(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("IsMaster", "0.0.0.0", "").Twice().Return(false, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(true, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Twice().Return(false, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(true, nil) checker := rfservice.NewRedisFailoverChecker(ms, mr, log.DummyLogger{}) master, err := checker.GetRedisesMasterPod(rf) @@ -757,8 +757,8 @@ func TestGetRedisPodsNames(t *testing.T) { assert.Equal(master, "master") ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) - mr.On("IsMaster", "0.0.0.0", "").Twice().Return(false, nil) - mr.On("IsMaster", "1.1.1.1", "").Once().Return(true, nil) + mr.On("IsMaster", "0.0.0.0", "0", "").Twice().Return(false, nil) + mr.On("IsMaster", "1.1.1.1", "0", "").Once().Return(true, nil) namePods, err := checker.GetRedisesSlavesPods(rf) diff --git a/operator/redisfailover/service/generator.go b/operator/redisfailover/service/generator.go index 9beab34ef..8f67d03be 100644 --- a/operator/redisfailover/service/generator.go +++ b/operator/redisfailover/service/generator.go @@ -22,13 +22,20 @@ import ( const ( redisConfigurationVolumeName = "redis-config" // Template used to build the Redis configuration - redisConfigTemplate = `slaveof 127.0.0.1 6379 + redisConfigTemplate = `slaveof 127.0.0.1 {{.Spec.Redis.Port}} +port {{.Spec.Redis.Port}} tcp-keepalive 60 save 900 1 save 300 10 +user pinger -@all +ping on >pingpass {{- range .Spec.Redis.CustomCommandRenames}} rename-command "{{.From}}" "{{.To}}" {{- end}} +` + sentinelConfigTemplate = `sentinel monitor mymaster 127.0.0.1 {{.Spec.Redis.Port}} 2 +sentinel down-after-milliseconds mymaster 1000 +sentinel failover-timeout mymaster 3000 +sentinel parallel-syncs mymaster 2 ` redisShutdownConfigurationVolumeName = "redis-shutdown-config" redisReadinessVolumeName = "redis-readiness-config" @@ -108,10 +115,18 @@ func generateSentinelConfigMap(rf *redisfailoverv1.RedisFailover, labels map[str namespace := rf.Namespace labels = util.MergeLabels(labels, generateSelectorLabels(sentinelRoleName, rf.Name)) - sentinelConfigFileContent := `sentinel monitor mymaster 127.0.0.1 6379 2 -sentinel down-after-milliseconds mymaster 1000 -sentinel failover-timeout mymaster 3000 -sentinel parallel-syncs mymaster 2` + + tmpl, err := template.New("sentinel").Parse(sentinelConfigTemplate) + if err != nil { + panic(err) + } + + var tplOutput bytes.Buffer + if err := tmpl.Execute(&tplOutput, rf); err != nil { + panic(err) + } + + sentinelConfigFileContent := tplOutput.String() return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -161,15 +176,23 @@ func generateRedisConfigMap(rf *redisfailoverv1.RedisFailover, labels map[string func generateRedisShutdownConfigMap(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.ConfigMap { name := GetRedisShutdownConfigMapName(rf) + port := rf.Spec.Redis.Port namespace := rf.Namespace - rfName := strings.ToUpper(rf.Name) + rfName := strings.Replace(strings.ToUpper(rf.Name), "-", "_", -1) labels = util.MergeLabels(labels, generateSelectorLabels(redisRoleName, rf.Name)) - shutdownContent := fmt.Sprintf(`master=$(redis-cli -h ${RFS_%[1]v_SERVICE_HOST} -p ${RFS_%[1]v_SERVICE_PORT_SENTINEL} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | tr -d '\"' |cut -d' ' -f1) -redis-cli SAVE + shutdownContent := fmt.Sprintf(`echo "shutdown in progress..." +master=$(redis-cli -h ${RFS_%[1]v_SERVICE_HOST} -p ${RFS_%[1]v_SERVICE_PORT_SENTINEL} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | tr -d '\"' |cut -d' ' -f1) if [ "$master" = "$(hostname -i)" ]; then + sleep 31 redis-cli -h ${RFS_%[1]v_SERVICE_HOST} -p ${RFS_%[1]v_SERVICE_PORT_SENTINEL} SENTINEL failover mymaster -fi`, rfName) +fi +cmd="redis-cli -p %[2]v" +if [ ! -z "${REDIS_PASSWORD}" ]; then + cmd="${cmd} --no-auth-warning -a \"${REDIS_PASSWORD}\"" +fi +save_command="${cmd} save" +eval $save_command`, rfName, port) return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -185,49 +208,50 @@ fi`, rfName) } func generateRedisReadinessConfigMap(rf *redisfailoverv1.RedisFailover, labels map[string]string, ownerRefs []metav1.OwnerReference) *corev1.ConfigMap { name := GetRedisReadinessName(rf) + port := rf.Spec.Redis.Port namespace := rf.Namespace labels = util.MergeLabels(labels, generateSelectorLabels(redisRoleName, rf.Name)) - readinessContent := `ROLE="role" - ROLE_MASTER="role:master" - ROLE_SLAVE="role:slave" - IN_SYNC="master_sync_in_progress:1" - NO_MASTER="master_host:127.0.0.1" - - cmd="redis-cli" - if [ ! -z "${REDIS_PASSWORD}" ]; then - cmd="${cmd} --no-auth-warning -a \"${REDIS_PASSWORD}\"" - fi - - cmd="${cmd} info replication" - - check_master(){ - exit 0 - } - - check_slave(){ - in_sync=$(echo "${cmd} | grep ${IN_SYNC} | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) - no_master=$(echo "${cmd} | grep ${NO_MASTER} | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) - - if [ -z "$in_sync" ] && [ -z "$no_master" ]; then - exit 0 - fi - - exit 1 - } - - role=$(echo "${cmd} | grep $ROLE | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) - case $role in - $ROLE_MASTER) - check_master - ;; - $ROLE_SLAVE) - check_slave - ;; - *) - echo "unespected" - exit 1 - esac` + readinessContent := fmt.Sprintf(`ROLE="role" +ROLE_MASTER="role:master" +ROLE_SLAVE="role:slave" +IN_SYNC="master_sync_in_progress:1" +NO_MASTER="master_host:127.0.0.1" + +cmd="redis-cli -p %[1]v" +if [ ! -z "${REDIS_PASSWORD}" ]; then + cmd="${cmd} --no-auth-warning -a \"${REDIS_PASSWORD}\"" +fi + +cmd="${cmd} info replication" + +check_master(){ + exit 0 +} + +check_slave(){ + in_sync=$(echo "${cmd} | grep ${IN_SYNC} | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) + no_master=$(echo "${cmd} | grep ${NO_MASTER} | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) + + if [ -z "$in_sync" ] && [ -z "$no_master" ]; then + exit 0 + fi + + exit 1 +} + +role=$(echo "${cmd} | grep $ROLE | tr -d \"\\r\" | tr -d \"\\n\"" | xargs -0 sh -c) +case $role in + $ROLE_MASTER) + check_master + ;; + $ROLE_SLAVE) + check_slave + ;; + *) + echo "unespected" + exit 1 +esac`, port) return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -278,6 +302,7 @@ func generateRedisStatefulSet(rf *redisfailoverv1.RedisFailover, labels map[stri Spec: corev1.PodSpec{ Affinity: getAffinity(rf.Spec.Redis.Affinity, labels), Tolerations: rf.Spec.Redis.Tolerations, + TopologySpreadConstraints: rf.Spec.Redis.TopologySpreadConstraints, NodeSelector: rf.Spec.Redis.NodeSelector, SecurityContext: getSecurityContext(rf.Spec.Redis.SecurityContext), HostNetwork: rf.Spec.Redis.HostNetwork, @@ -295,7 +320,7 @@ func generateRedisStatefulSet(rf *redisfailoverv1.RedisFailover, labels map[stri Ports: []corev1.ContainerPort{ { Name: "redis", - ContainerPort: 6379, + ContainerPort: rf.Spec.Redis.Port, Protocol: corev1.ProtocolTCP, }, }, @@ -320,7 +345,7 @@ func generateRedisStatefulSet(rf *redisfailoverv1.RedisFailover, labels map[stri Command: []string{ "sh", "-c", - "redis-cli -h $(hostname) ping", + fmt.Sprintf("redis-cli -h $(hostname) -p %[1]v --user pinger --pass pingpass --no-auth-warning ping", rf.Spec.Redis.Port), }, }, }, @@ -414,15 +439,16 @@ func generateSentinelDeployment(rf *redisfailoverv1.RedisFailover, labels map[st Annotations: rf.Spec.Sentinel.PodAnnotations, }, Spec: corev1.PodSpec{ - Affinity: getAffinity(rf.Spec.Sentinel.Affinity, labels), - Tolerations: rf.Spec.Sentinel.Tolerations, - NodeSelector: rf.Spec.Sentinel.NodeSelector, - SecurityContext: getSecurityContext(rf.Spec.Sentinel.SecurityContext), - HostNetwork: rf.Spec.Sentinel.HostNetwork, - DNSPolicy: getDnsPolicy(rf.Spec.Sentinel.DNSPolicy), - ImagePullSecrets: rf.Spec.Sentinel.ImagePullSecrets, - PriorityClassName: rf.Spec.Sentinel.PriorityClassName, - ServiceAccountName: rf.Spec.Sentinel.ServiceAccountName, + Affinity: getAffinity(rf.Spec.Sentinel.Affinity, labels), + Tolerations: rf.Spec.Sentinel.Tolerations, + TopologySpreadConstraints: rf.Spec.Sentinel.TopologySpreadConstraints, + NodeSelector: rf.Spec.Sentinel.NodeSelector, + SecurityContext: getSecurityContext(rf.Spec.Sentinel.SecurityContext), + HostNetwork: rf.Spec.Sentinel.HostNetwork, + DNSPolicy: getDnsPolicy(rf.Spec.Sentinel.DNSPolicy), + ImagePullSecrets: rf.Spec.Sentinel.ImagePullSecrets, + PriorityClassName: rf.Spec.Sentinel.PriorityClassName, + ServiceAccountName: rf.Spec.Sentinel.ServiceAccountName, InitContainers: []corev1.Container{ { Name: "sentinel-config-copy", @@ -607,6 +633,13 @@ func createRedisExporterContainer(rf *redisfailoverv1.RedisFailover) corev1.Cont } + if rf.Spec.Redis.Port != 6379 { + container.Env = append(container.Env, corev1.EnvVar{ + Name: "REDIS_ADDR", + Value: fmt.Sprintf("redis://localhost:%[1]v", rf.Spec.Redis.Port), + }) + } + return container } diff --git a/operator/redisfailover/service/heal.go b/operator/redisfailover/service/heal.go index a0caf34b7..8bdec8089 100644 --- a/operator/redisfailover/service/heal.go +++ b/operator/redisfailover/service/heal.go @@ -47,7 +47,8 @@ func (r *RedisFailoverHealer) MakeMaster(ip string, rf *redisfailoverv1.RedisFai return err } - return r.redisClient.MakeMaster(ip, password) + port := getRedisPort(rf.Spec.Redis.Port) + return r.redisClient.MakeMaster(ip, port, password) } // SetOldestAsMaster puts all redis to the same master, choosen by order of appearance @@ -70,17 +71,18 @@ func (r *RedisFailoverHealer) SetOldestAsMaster(rf *redisfailoverv1.RedisFailove return err } + port := getRedisPort(rf.Spec.Redis.Port) newMasterIP := "" for _, pod := range ssp.Items { if newMasterIP == "" { newMasterIP = pod.Status.PodIP r.logger.Debugf("New master is %s with ip %s", pod.Name, newMasterIP) - if err := r.redisClient.MakeMaster(newMasterIP, password); err != nil { + if err := r.redisClient.MakeMaster(newMasterIP, port, password); err != nil { return err } } else { r.logger.Debugf("Making pod %s slave of %s", pod.Name, newMasterIP) - if err := r.redisClient.MakeSlaveOf(pod.Status.PodIP, newMasterIP, password); err != nil { + if err := r.redisClient.MakeSlaveOfWithPort(pod.Status.PodIP, newMasterIP, port, password); err != nil { return err } } @@ -100,15 +102,18 @@ func (r *RedisFailoverHealer) SetMasterOnAll(masterIP string, rf *redisfailoverv return err } + port := getRedisPort(rf.Spec.Redis.Port) for _, pod := range ssp.Items { + //TODO: Review this logic + // This might be slightly dangerous if pod.Status.PodIP == masterIP { r.logger.Debugf("Ensure pod %s is master", pod.Name) - if err := r.redisClient.MakeMaster(masterIP, password); err != nil { + if err := r.redisClient.MakeMaster(masterIP, port, password); err != nil { return err } } else { r.logger.Debugf("Making pod %s slave of %s", pod.Name, masterIP) - if err := r.redisClient.MakeSlaveOf(pod.Status.PodIP, masterIP, password); err != nil { + if err := r.redisClient.MakeSlaveOfWithPort(pod.Status.PodIP, masterIP, port, password); err != nil { return err } } @@ -148,8 +153,8 @@ func (r *RedisFailoverHealer) NewSentinelMonitor(ip string, monitor string, rf * if err != nil { return err } - - return r.redisClient.MonitorRedis(ip, monitor, quorum, password) + rport := getRedisPort(rf.Spec.Redis.Port) + return r.redisClient.MonitorRedisWithPort(ip, monitor, rport, quorum, password) } // NewSentinelMonitorWithPort changes the master that Sentinel has to monitor by the provided IP and Port @@ -186,7 +191,8 @@ func (r *RedisFailoverHealer) SetRedisCustomConfig(ip string, rf *redisfailoverv return err } - return r.redisClient.SetCustomRedisConfig(ip, rf.Spec.Redis.CustomConfig, password) + port := getRedisPort(rf.Spec.Redis.Port) + return r.redisClient.SetCustomRedisConfig(ip, port, rf.Spec.Redis.CustomConfig, password) } //DeletePod delete a failing pod so kubernetes relaunch it again diff --git a/operator/redisfailover/service/heal_test.go b/operator/redisfailover/service/heal_test.go index 61eff590a..95f358011 100644 --- a/operator/redisfailover/service/heal_test.go +++ b/operator/redisfailover/service/heal_test.go @@ -33,7 +33,7 @@ func TestSetOldestAsMasterNewMasterError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(errors.New("")) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(errors.New("")) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -59,7 +59,7 @@ func TestSetOldestAsMaster(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(nil) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(nil) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -90,8 +90,8 @@ func TestSetOldestAsMasterMultiplePodsMakeSlaveOfError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(nil) - mr.On("MakeSlaveOf", "1.1.1.1", "0.0.0.0", "").Once().Return(errors.New("")) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(nil) + mr.On("MakeSlaveOfWithPort", "1.1.1.1", "0.0.0.0", "0", "").Once().Return(errors.New("")) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -122,8 +122,8 @@ func TestSetOldestAsMasterMultiplePods(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(nil) - mr.On("MakeSlaveOf", "1.1.1.1", "0.0.0.0", "").Once().Return(nil) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(nil) + mr.On("MakeSlaveOfWithPort", "1.1.1.1", "0.0.0.0", "0", "").Once().Return(nil) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -164,8 +164,8 @@ func TestSetOldestAsMasterOrdering(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "1.1.1.1", "").Once().Return(nil) - mr.On("MakeSlaveOf", "0.0.0.0", "1.1.1.1", "").Once().Return(nil) + mr.On("MakeMaster", "1.1.1.1", "0", "").Once().Return(nil) + mr.On("MakeSlaveOfWithPort", "0.0.0.0", "1.1.1.1", "0", "").Once().Return(nil) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -196,7 +196,7 @@ func TestSetMasterOnAllMakeMasterError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(errors.New("")) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(errors.New("")) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -227,8 +227,8 @@ func TestSetMasterOnAllMakeSlaveOfError(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(nil) - mr.On("MakeSlaveOf", "1.1.1.1", "0.0.0.0", "").Once().Return(errors.New("")) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(nil) + mr.On("MakeSlaveOfWithPort", "1.1.1.1", "0.0.0.0", "0", "").Once().Return(errors.New("")) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -259,8 +259,8 @@ func TestSetMasterOnAll(t *testing.T) { ms := &mK8SService.Services{} ms.On("GetStatefulSetPods", namespace, rfservice.GetRedisName(rf)).Once().Return(pods, nil) mr := &mRedisService.Client{} - mr.On("MakeMaster", "0.0.0.0", "").Once().Return(nil) - mr.On("MakeSlaveOf", "1.1.1.1", "0.0.0.0", "").Once().Return(nil) + mr.On("MakeMaster", "0.0.0.0", "0", "").Once().Return(nil) + mr.On("MakeSlaveOfWithPort", "1.1.1.1", "0.0.0.0", "0", "").Once().Return(nil) healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) @@ -363,12 +363,11 @@ func TestNewSentinelMonitor(t *testing.T) { ms := &mK8SService.Services{} mr := &mRedisService.Client{} errorExpected := false - if test.errorOnMonitorRedis { errorExpected = true - mr.On("MonitorRedis", "0.0.0.0", "1.1.1.1", "2", "").Once().Return(errors.New("")) + mr.On("MonitorRedisWithPort", "0.0.0.0", "1.1.1.1", "0", "2", "").Once().Return(errors.New("")) } else { - mr.On("MonitorRedis", "0.0.0.0", "1.1.1.1", "2", "").Once().Return(nil) + mr.On("MonitorRedisWithPort", "0.0.0.0", "1.1.1.1", "0", "2", "").Once().Return(nil) } healer := rfservice.NewRedisFailoverHealer(ms, mr, log.DummyLogger{}) diff --git a/service/redis/client.go b/service/redis/client.go index 036bfebe3..1c3ccafe6 100644 --- a/service/redis/client.go +++ b/service/redis/client.go @@ -16,17 +16,17 @@ type Client interface { GetNumberSentinelsInMemory(ip string) (int32, error) GetNumberSentinelSlavesInMemory(ip string) (int32, error) ResetSentinel(ip string) error - GetSlaveOf(ip, password string) (string, error) - IsMaster(ip, password string) (bool, error) + GetSlaveOf(ip, port, password string) (string, error) + IsMaster(ip, port, password string) (bool, error) MonitorRedis(ip, monitor, quorum, password string) error MonitorRedisWithPort(ip, monitor, port, quorum, password string) error - MakeMaster(ip, password string) error + MakeMaster(ip, port, password string) error MakeSlaveOf(ip, masterIP, password string) error MakeSlaveOfWithPort(ip, masterIP, masterPort, password string) error GetSentinelMonitor(ip string) (string, string, error) SetCustomSentinelConfig(ip string, configs []string) error - SetCustomRedisConfig(ip string, configs []string, password string) error - SlaveIsReady(ip, password string) (bool, error) + SetCustomRedisConfig(ip string, port string, configs []string, password string) error + SlaveIsReady(ip, port, password string) (bool, error) } type client struct { @@ -142,9 +142,9 @@ func (c *client) ResetSentinel(ip string) error { } // GetSlaveOf returns the master of the given redis, or nil if it's master -func (c *client) GetSlaveOf(ip, password string) (string, error) { +func (c *client) GetSlaveOf(ip, port, password string) (string, error) { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), + Addr: net.JoinHostPort(ip, port), Password: password, DB: 0, } @@ -161,9 +161,9 @@ func (c *client) GetSlaveOf(ip, password string) (string, error) { return match[1], nil } -func (c *client) IsMaster(ip, password string) (bool, error) { +func (c *client) IsMaster(ip, port, password string) (bool, error) { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), + Addr: net.JoinHostPort(ip, port), Password: password, DB: 0, } @@ -215,9 +215,9 @@ func (c *client) MonitorRedisWithPort(ip, monitor, port, quorum, password string return nil } -func (c *client) MakeMaster(ip string, password string) error { +func (c *client) MakeMaster(ip string, port string, password string) error { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), + Addr: net.JoinHostPort(ip, port), Password: password, DB: 0, } @@ -235,7 +235,7 @@ func (c *client) MakeSlaveOf(ip, masterIP, password string) error { func (c *client) MakeSlaveOfWithPort(ip, masterIP, masterPort, password string) error { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), // this is IP and Port for the RedisFailover redis + Addr: net.JoinHostPort(ip, masterPort), // this is IP and Port for the RedisFailover redis Password: password, DB: 0, } @@ -290,9 +290,9 @@ func (c *client) SetCustomSentinelConfig(ip string, configs []string) error { return nil } -func (c *client) SetCustomRedisConfig(ip string, configs []string, password string) error { +func (c *client) SetCustomRedisConfig(ip string, port string, configs []string, password string) error { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), + Addr: net.JoinHostPort(ip, port), Password: password, DB: 0, } @@ -333,9 +333,9 @@ func (c *client) getConfigParameters(config string) (parameter string, value str return s[0], strings.Join(s[1:], " "), nil } -func (c *client) SlaveIsReady(ip, password string) (bool, error) { +func (c *client) SlaveIsReady(ip, port, password string) (bool, error) { options := &rediscli.Options{ - Addr: net.JoinHostPort(ip, redisPort), + Addr: net.JoinHostPort(ip, port), Password: password, DB: 0, } diff --git a/test/integration/redisfailover/creation_test.go b/test/integration/redisfailover/creation_test.go index 46ae8cbee..27acc4aaa 100644 --- a/test/integration/redisfailover/creation_test.go +++ b/test/integration/redisfailover/creation_test.go @@ -216,7 +216,7 @@ func (c *clients) testRedisMaster(t *testing.T) { for _, pod := range redisPodList.Items { ip := pod.Status.PodIP - if ok, _ := c.redisClient.IsMaster(ip, testPass); ok { + if ok, _ := c.redisClient.IsMaster(ip, "6379", testPass); ok { masters = append(masters, ip) } } @@ -247,7 +247,7 @@ func (c *clients) testSentinelMonitoring(t *testing.T) { assert.Equal(masters[0], masterIP, "all master ip monitoring should equal") } - isMaster, err := c.redisClient.IsMaster(masters[0], testPass) + isMaster, err := c.redisClient.IsMaster(masters[0], "6379", testPass) assert.NoError(err) assert.True(isMaster, "Sentinel should monitor the Redis master") }