diff --git a/Dockerfile b/Dockerfile index 6f3eda380..2c1b59080 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,8 @@ ARG DOCKER_REGISTRY -ARG ALPINE_VERSION=3.15 +ARG DISTROLESS_DOCKER_REGISTRY +ARG ALPINE_VERSION=3.17 ARG BUILDPLATFORM=linux/amd64 -FROM --platform=$BUILDPLATFORM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}golang:1.18-alpine${ALPINE_VERSION} as go-builder +FROM --platform=$BUILDPLATFORM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}golang:1.19-alpine${ALPINE_VERSION} as go-builder ARG PROJECT_NAME=zookeeper-operator ARG REPO_PATH=github.com/pravega/$PROJECT_NAME @@ -32,13 +33,10 @@ COPY controllers/ controllers/ RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 go build -o /src/${PROJECT_NAME} \ -ldflags "-X ${REPO_PATH}/pkg/version.Version=${VERSION} -X ${REPO_PATH}/pkg/version.GitSHA=${GIT_SHA}" main.go -FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}alpine:${ALPINE_VERSION} AS final +FROM ${DISTROLESS_DOCKER_REGISTRY:-gcr.io/}distroless/static-debian11:nonroot AS final ARG PROJECT_NAME=zookeeper-operator COPY --from=go-builder /src/${PROJECT_NAME} /usr/local/bin/${PROJECT_NAME} -RUN adduser -D ${PROJECT_NAME} -USER ${PROJECT_NAME} - ENTRYPOINT ["/usr/local/bin/zookeeper-operator"] diff --git a/Makefile b/Makefile index d8ea937b6..55e0d9498 100644 --- a/Makefile +++ b/Makefile @@ -130,10 +130,11 @@ build-go: -o bin/$(EXPORTER_NAME)-windows-amd64.exe cmd/exporter/main.go build-image: - docker build --build-arg VERSION=$(VERSION) --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) -t $(REPO):$(VERSION) . + docker build --build-arg VERSION=$(VERSION) --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg DISTROLESS_DOCKER_REGISTRY=$(DISTROLESS_DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) -t $(REPO):$(VERSION) . docker tag $(REPO):$(VERSION) $(REPO):latest build-zk-image: + docker build --build-arg VERSION=$(VERSION) --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) -t $(APP_REPO):$(VERSION) ./docker docker tag $(APP_REPO):$(VERSION) $(APP_REPO):latest diff --git a/api/v1beta1/deepcopy_test.go b/api/v1beta1/deepcopy_test.go index 52edca265..5743a5af1 100644 --- a/api/v1beta1/deepcopy_test.go +++ b/api/v1beta1/deepcopy_test.go @@ -173,6 +173,9 @@ var _ = Describe("ZookeeperCluster DeepCopy", func() { It("value of Tol should be example", func() { Ω(z2.Spec.Pod.Tolerations[0].Key).To(Equal("tol")) }) + It("have empty topologySpreadConstraints", func() { + Ω(z2.Spec.Pod.TopologySpreadConstraints).To(HaveLen(0)) + }) It("checking status conditions", func() { Ω(z2.Status.Conditions[0].Reason).To(Equal(z1.Status.Conditions[0].Reason)) }) @@ -242,6 +245,14 @@ var _ = Describe("ZookeeperCluster DeepCopy", func() { podpolicy2 := podpolicy.DeepCopy() Ω(podpolicy2).To(BeNil()) }) + It("checking for deepcopy podpolicy.topologyspreadconstraints", func() { + t := v1.TopologySpreadConstraint{} + podpolicy := v1beta1.PodPolicy{ + TopologySpreadConstraints: []v1.TopologySpreadConstraint{t}, + } + podpolicy2 := podpolicy.DeepCopy() + Ω(podpolicy2.TopologySpreadConstraints).To(HaveLen(1)) + }) It("checking for nil zookeepercluster", func() { var zk *v1beta1.ZookeeperCluster zk2 := zk.DeepCopy() diff --git a/api/v1beta1/zookeepercluster_types.go b/api/v1beta1/zookeepercluster_types.go index 3f5da83b4..f64474b4d 100644 --- a/api/v1beta1/zookeepercluster_types.go +++ b/api/v1beta1/zookeepercluster_types.go @@ -26,7 +26,7 @@ const ( // DefaultZkContainerVersion is the default tag used for for the zookeeper // container - DefaultZkContainerVersion = "0.2.14" + DefaultZkContainerVersion = "0.2.15" // DefaultZkContainerPolicy is the default container pull policy used DefaultZkContainerPolicy = "Always" @@ -82,8 +82,9 @@ type ZookeeperClusterSpec struct { // Image is the container image. default is zookeeper:0.2.10 Image ContainerImage `json:"image,omitempty"` - // Labels specifies the labels to attach to pods the operator creates for - // the zookeeper cluster. + // Labels specifies the labels to attach to all resources the operator + // creates for the zookeeper cluster, including StatefulSet, Pod, + // PersistentVolumeClaim, Service, ConfigMap, et al. Labels map[string]string `json:"labels,omitempty"` // Replicas is the expected size of the zookeeper cluster. @@ -433,8 +434,8 @@ func (c *ContainerImage) ToString() string { // PodPolicy defines the common pod configuration for Pods, including when used // in deployments, stateful-sets, etc. type PodPolicy struct { - // Labels specifies the labels to attach to pods the operator creates for - // the zookeeper cluster. + // Labels specifies the labels to attach to pods the operator creates for the + // zookeeper cluster. Overrides any values specified in Spec.Labels. Labels map[string]string `json:"labels,omitempty"` // NodeSelector specifies a map of key-value pairs. For the pod to be @@ -445,6 +446,9 @@ type PodPolicy struct { // The scheduling constraints on pods. Affinity *v1.Affinity `json:"affinity,omitempty"` + // TopologySpreadConstraints to apply to the pods + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // Resources is the resource requirements for the container. // This field cannot be updated once the cluster is created. Resources v1.ResourceRequirements `json:"resources,omitempty"` diff --git a/api/v1beta1/zookeepercluster_types_test.go b/api/v1beta1/zookeepercluster_types_test.go index c098cbd67..c8d59fb0b 100644 --- a/api/v1beta1/zookeepercluster_types_test.go +++ b/api/v1beta1/zookeepercluster_types_test.go @@ -79,7 +79,7 @@ var _ = Describe("ZookeeperCluster Types", func() { }) It("Checking tostring() function", func() { - Ω(z.Spec.Image.ToString()).To(Equal("pravega/zookeeper:0.2.14")) + Ω(z.Spec.Image.ToString()).To(Equal("pravega/zookeeper:0.2.15")) }) }) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 409d989b1..e712f6cba 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -192,6 +192,13 @@ func (in *PodPolicy) DeepCopyInto(out *PodPolicy) { *out = new(v1.Affinity) (*in).DeepCopyInto(*out) } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Resources.DeepCopyInto(&out.Resources) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations diff --git a/charts/zookeeper-operator/Chart.yaml b/charts/zookeeper-operator/Chart.yaml index 9fafe472a..5ed411662 100644 --- a/charts/zookeeper-operator/Chart.yaml +++ b/charts/zookeeper-operator/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: zookeeper-operator description: Zookeeper Operator Helm chart for Kubernetes -version: 0.2.14 -appVersion: 0.2.14 +version: 0.2.15 +appVersion: 0.2.15 keywords: - zookeeper - storage diff --git a/charts/zookeeper-operator/README.md b/charts/zookeeper-operator/README.md index 3175a2ceb..4497ecf29 100644 --- a/charts/zookeeper-operator/README.md +++ b/charts/zookeeper-operator/README.md @@ -55,7 +55,7 @@ The following table lists the configurable parameters of the zookeeper-operator | `hooks.image.tag` | Image tag for batch jobs | `"v1.16.10"` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.repository` | Image repository | `pravega/zookeeper-operator` | -| `image.tag` | Image tag | `0.2.14` | +| `image.tag` | Image tag | `0.2.15` | | `labels` | Operator pod labels | `{}` | | `nodeSelector` | Map of key-value pairs to be present as labels in the node in which the pod should run | `{}` | | `rbac.create` | Create RBAC resources | `true` | diff --git a/charts/zookeeper-operator/templates/pre-delete-hooks.yaml b/charts/zookeeper-operator/templates/pre-delete-hooks.yaml index 0156d4849..77fa3895a 100644 --- a/charts/zookeeper-operator/templates/pre-delete-hooks.yaml +++ b/charts/zookeeper-operator/templates/pre-delete-hooks.yaml @@ -115,4 +115,16 @@ spec: configMap: name: {{ template "zookeeper-operator.fullname" . }}-pre-delete defaultMode: 0555 + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} {{- end }} diff --git a/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml b/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml index 03555d8d9..c65974754 100644 --- a/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml +++ b/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml @@ -2684,8 +2684,9 @@ spec: labels: additionalProperties: type: string - description: Labels specifies the labels to attach to pods the operator - creates for the zookeeper cluster. + description: Labels specifies the labels to attach to all resources + the operator creates for the zookeeper cluster, including StatefulSet, + Pod, PersistentVolumeClaim, Service, ConfigMap, et al. type: object maxUnavailableReplicas: description: MaxUnavailableReplicas defines the MaxUnavailable Replicas @@ -3875,7 +3876,8 @@ spec: additionalProperties: type: string description: Labels specifies the labels to attach to pods the - operator creates for the zookeeper cluster. + operator creates for the zookeeper cluster. Overrides any values + specified in Spec.Labels. type: object nodeSelector: additionalProperties: @@ -4133,6 +4135,144 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: TopologySpreadConstraints to apply to the pods + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is an alpha field and + requires enabling MinDomainsInPodTopologySpread feature + gate." + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes match the node selector. e.g. + If TopologyKey is "kubernetes.io/hostname", each Node + is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's a required + field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object ports: items: diff --git a/charts/zookeeper-operator/values.yaml b/charts/zookeeper-operator/values.yaml index eeef7645a..afc8397e3 100644 --- a/charts/zookeeper-operator/values.yaml +++ b/charts/zookeeper-operator/values.yaml @@ -9,7 +9,7 @@ global: image: repository: pravega/zookeeper-operator - tag: 0.2.14 + tag: 0.2.15 pullPolicy: IfNotPresent securityContext: {} diff --git a/charts/zookeeper/Chart.yaml b/charts/zookeeper/Chart.yaml index 3ba08e5a7..594d7de3d 100644 --- a/charts/zookeeper/Chart.yaml +++ b/charts/zookeeper/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: zookeeper description: Zookeeper Helm chart for Kubernetes -version: 0.2.14 -appVersion: 0.2.14 +version: 0.2.15 +appVersion: 0.2.15 keywords: - zookeeper - storage diff --git a/charts/zookeeper/README.md b/charts/zookeeper/README.md index 8d0417bdb..95027a3ad 100644 --- a/charts/zookeeper/README.md +++ b/charts/zookeeper/README.md @@ -57,7 +57,7 @@ The following table lists the configurable parameters of the zookeeper chart and | `maxUnavailableReplicas` | Max unavailable replicas in pdb | `1` | | `triggerRollingRestart` | If true, the zookeeper cluster is restarted. After the restart is triggered, this value is auto-reverted to false. | `false` | | `image.repository` | Image repository | `pravega/zookeeper` | -| `image.tag` | Image tag | `0.2.14` | +| `image.tag` | Image tag | `0.2.15` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `domainName` | External host name appended for dns annotation | | | `kubernetesClusterDomain` | Domain of the kubernetes cluster | `cluster.local` | diff --git a/charts/zookeeper/templates/zookeeper.yaml b/charts/zookeeper/templates/zookeeper.yaml index 6a877a8c1..902440e61 100644 --- a/charts/zookeeper/templates/zookeeper.yaml +++ b/charts/zookeeper/templates/zookeeper.yaml @@ -76,6 +76,10 @@ spec: {{- if .Values.pod.affinity }} affinity: {{ toYaml .Values.pod.affinity | indent 6 }} + {{- end }} + {{- if .Values.pod.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.pod.topologySpreadConstraints | indent 6 }} {{- end }} {{- if .Values.pod.resources }} resources: diff --git a/charts/zookeeper/values.yaml b/charts/zookeeper/values.yaml index 0209149a8..8a449f0b9 100644 --- a/charts/zookeeper/values.yaml +++ b/charts/zookeeper/values.yaml @@ -3,7 +3,7 @@ maxUnavailableReplicas: image: repository: pravega/zookeeper - tag: 0.2.14 + tag: 0.2.15 pullPolicy: IfNotPresent triggerRollingRestart: false @@ -36,6 +36,7 @@ pod: # labels: {} # nodeSelector: {} # affinity: {} + # topologySpreadConstraints: {} # resources: {} # tolerations: [] # env: [] diff --git a/config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml b/config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml index 46f114d39..44b0d742c 100644 --- a/config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml +++ b/config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml @@ -2683,8 +2683,9 @@ spec: labels: additionalProperties: type: string - description: Labels specifies the labels to attach to pods the operator - creates for the zookeeper cluster. + description: Labels specifies the labels to attach to all resources + the operator creates for the zookeeper cluster, including StatefulSet, + Pod, PersistentVolumeClaim, Service, ConfigMap, et al. type: object maxUnavailableReplicas: description: MaxUnavailableReplicas defines the MaxUnavailable Replicas @@ -3874,7 +3875,8 @@ spec: additionalProperties: type: string description: Labels specifies the labels to attach to pods the - operator creates for the zookeeper cluster. + operator creates for the zookeeper cluster. Overrides any values + specified in Spec.Labels. type: object nodeSelector: additionalProperties: @@ -4132,6 +4134,144 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: TopologySpreadConstraints to apply to the pods + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is an alpha field and + requires enabling MinDomainsInPodTopologySpread feature + gate." + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes match the node selector. e.g. + If TopologyKey is "kubernetes.io/hostname", each Node + is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's a required + field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object ports: items: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 718a8b499..18163b4c5 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -16,7 +16,7 @@ spec: containers: - name: zookeeper-operator # Replace this with the built image name - image: pravega/zookeeper-operator:0.2.14 + image: pravega/zookeeper-operator:0.2.15 ports: - containerPort: 60000 name: metrics diff --git a/config/samples/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml b/config/samples/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml index 26faab41d..8130f7533 100644 --- a/config/samples/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml +++ b/config/samples/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml @@ -6,7 +6,7 @@ spec: replicas: 3 image: repository: pravega/zookeeper - tag: 0.2.14 + tag: 0.2.15 storageType: persistence persistence: reclaimPolicy: Retain diff --git a/config/samples/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml b/config/samples/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml index b28a34fda..d3520411c 100644 --- a/config/samples/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml +++ b/config/samples/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml @@ -6,7 +6,7 @@ spec: replicas: 3 image: repository: pravega/zookeeper - tag: 0.2.14 + tag: 0.2.15 storageType: persistence persistence: reclaimPolicy: Delete diff --git a/controllers/zookeepercluster_controller.go b/controllers/zookeepercluster_controller.go index 934f4050c..eb6241359 100644 --- a/controllers/zookeepercluster_controller.go +++ b/controllers/zookeepercluster_controller.go @@ -15,19 +15,19 @@ import ( "strconv" "time" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/pravega/zookeeper-operator/pkg/controller/config" "github.com/pravega/zookeeper-operator/pkg/utils" "github.com/pravega/zookeeper-operator/pkg/yamlexporter" "github.com/pravega/zookeeper-operator/pkg/zk" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/go-logr/logr" - zookeeperv1beta1 "github.com/pravega/zookeeper-operator/api/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -38,6 +38,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + zookeeperv1beta1 "github.com/pravega/zookeeper-operator/api/v1beta1" ) // ReconcileTime is the delay between reconciliations @@ -467,7 +469,7 @@ func (r *ZookeeperClusterReconciler) reconcilePodDisruptionBudget(instance *zook if err = controllerutil.SetControllerReference(instance, pdb, r.Scheme); err != nil { return err } - foundPdb := &policyv1beta1.PodDisruptionBudget{} + foundPdb := &policyv1.PodDisruptionBudget{} err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: pdb.Name, Namespace: pdb.Namespace, diff --git a/controllers/zookeepercluster_controller_test.go b/controllers/zookeepercluster_controller_test.go index fe530dbd7..712cccf1b 100644 --- a/controllers/zookeepercluster_controller_test.go +++ b/controllers/zookeepercluster_controller_test.go @@ -16,12 +16,9 @@ import ( "testing" "time" - "github.com/pravega/zookeeper-operator/api/v1beta1" - "github.com/pravega/zookeeper-operator/pkg/controller/config" - "github.com/pravega/zookeeper-operator/pkg/zk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" @@ -29,6 +26,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/pravega/zookeeper-operator/api/v1beta1" + "github.com/pravega/zookeeper-operator/pkg/controller/config" + "github.com/pravega/zookeeper-operator/pkg/zk" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -185,7 +186,7 @@ var _ = Describe("ZookeeperCluster Controller", func() { }) It("should create a pdb", func() { - foundPdb := &policyv1beta1.PodDisruptionBudget{} + foundPdb := &policyv1.PodDisruptionBudget{} err = cl.Get(context.TODO(), req.NamespacedName, foundPdb) Ω(err).To(BeNil()) }) diff --git a/docker/Dockerfile b/docker/Dockerfile index baaf42b3c..1ef1fbdc7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -21,4 +21,4 @@ RUN chmod +x /usr/local/bin/* COPY --from=0 /zu/build/libs/zu.jar /opt/libs/ RUN apt-get -q update && \ - apt-get install -y dnsutils curl procps + apt-get install -y dnsutils curl procps socat diff --git a/docker/bin/zookeeperLive.sh b/docker/bin/zookeeperLive.sh index 9291c0e5f..9e6d6e02f 100755 --- a/docker/bin/zookeeperLive.sh +++ b/docker/bin/zookeeperLive.sh @@ -13,7 +13,7 @@ set -ex source /conf/env.sh -OK=$(echo ruok | nc 127.0.0.1 $CLIENT_PORT) +OK=$(echo ruok | socat stdio tcp:localhost:$CLIENT_PORT) # Check to see if zookeeper service answers if [[ "$OK" == "imok" ]]; then diff --git a/docker/bin/zookeeperMetrics.sh b/docker/bin/zookeeperMetrics.sh index a5225ba83..bd0c875e3 100755 --- a/docker/bin/zookeeperMetrics.sh +++ b/docker/bin/zookeeperMetrics.sh @@ -13,4 +13,4 @@ set -ex source /conf/env.sh -echo mntr | nc localhost $CLIENT_PORT >& 1 +echo mntr | socat stdio tcp:localhost:$CLIENT_PORT >& 1 diff --git a/docker/bin/zookeeperReady.sh b/docker/bin/zookeeperReady.sh index e31469e6a..e9583e5b6 100755 --- a/docker/bin/zookeeperReady.sh +++ b/docker/bin/zookeeperReady.sh @@ -20,7 +20,7 @@ MYID_FILE=$DATA_DIR/myid LOG4J_CONF=/conf/log4j-quiet.properties STATIC_CONFIG=/data/conf/zoo.cfg -OK=$(echo ruok | nc 127.0.0.1 $CLIENT_PORT) +OK=$(echo ruok | socat stdio tcp:localhost:$CLIENT_PORT) # Check to see if zookeeper service answers if [[ "$OK" == "imok" ]]; then diff --git a/docker/bin/zookeeperTeardown.sh b/docker/bin/zookeeperTeardown.sh index 85af30fbb..9237dafa5 100755 --- a/docker/bin/zookeeperTeardown.sh +++ b/docker/bin/zookeeperTeardown.sh @@ -20,8 +20,8 @@ LOG4J_CONF=/conf/log4j-quiet.properties # Wait for client connections to drain. Kubernetes will wait until the confiugred # "terminationGracePeriodSeconds" before focibly killing the container -for (( i = 0; i < 6; i++ )); do - CONN_COUNT=`echo cons | nc localhost 2181 | grep -v "^$" |grep -v "/127.0.0.1:" | wc -l` +for ((i = 0; i < 6; i++)); do + CONN_COUNT=$(echo cons | socat stdio tcp:localhost:$CLIENT_PORT | grep -v "^$" | grep -v "/127.0.0.1:" | wc -l) if [[ "$CONN_COUNT" -gt 0 ]]; then echo "$CONN_COUNT non-local connections still connected." sleep 5 @@ -35,10 +35,10 @@ done set +e ZKURL=$(zkConnectionString) set -e -MYID=`cat $MYID_FILE` +MYID=$(cat $MYID_FILE) ZNODE_PATH="/zookeeper-operator/$CLUSTER_NAME" -CLUSTERSIZE=`java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar sync $ZKURL $ZNODE_PATH` +CLUSTERSIZE=$(java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar sync $ZKURL $ZNODE_PATH) echo "CLUSTER_SIZE=$CLUSTERSIZE, MyId=$MYID" if [[ -n "$CLUSTERSIZE" && "$CLUSTERSIZE" -lt "$MYID" ]]; then # If ClusterSize < MyId, this server is being permanantly removed. diff --git a/go.mod b/go.mod index 1f60fd171..def2cc2b8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/pravega/zookeeper-operator -go 1.18 +go 1.19 require ( github.com/ghodss/yaml v1.0.0 @@ -22,7 +22,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/emicklei/go-restful v2.16.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/go-logr/zapr v1.2.3 // indirect @@ -53,11 +53,11 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect + golang.org/x/net v0.7.0 // indirect golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 // indirect - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 2388f754b..31eb30954 100644 --- a/go.sum +++ b/go.sum @@ -133,8 +133,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= -github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= +github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -657,8 +657,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220526153639-5463443f8c37 h1:lUkvobShwKsOesNfWWlCS5q7fnbG1MEliIzwu886fn8= -golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -768,12 +768,12 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -782,8 +782,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/pkg/zk/generators.go b/pkg/zk/generators.go index 25da7f9a6..f13ae37bb 100644 --- a/pkg/zk/generators.go +++ b/pkg/zk/generators.go @@ -16,12 +16,13 @@ import ( "strconv" "strings" - "github.com/pravega/zookeeper-operator/api/v1beta1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pravega/zookeeper-operator/api/v1beta1" ) const ( @@ -91,6 +92,7 @@ func MakeStatefulSet(z *v1beta1.ZookeeperCluster) *appsv1.StatefulSet { GenerateName: z.GetName(), Labels: mergeLabels( z.Spec.Labels, + z.Spec.Pod.Labels, map[string]string{ "app": z.GetName(), "kind": "ZookeeperMember", @@ -171,9 +173,10 @@ func makeZkPodSpec(z *v1beta1.ZookeeperCluster, volumes []v1.Volume) v1.PodSpec zkContainer.Env = append(zkContainer.Env, z.Spec.Pod.Env...) podSpec := v1.PodSpec{ - Containers: append(z.Spec.Containers, zkContainer), - Affinity: z.Spec.Pod.Affinity, - Volumes: append(z.Spec.Volumes, volumes...), + Containers: append(z.Spec.Containers, zkContainer), + Affinity: z.Spec.Pod.Affinity, + TopologySpreadConstraints: z.Spec.Pod.TopologySpreadConstraints, + Volumes: append(z.Spec.Volumes, volumes...), } if !reflect.DeepEqual(v1.PodSecurityContext{}, z.Spec.Pod.SecurityContext) { podSpec.SecurityContext = z.Spec.Pod.SecurityContext @@ -352,19 +355,19 @@ func makeService(name string, ports []v1.ServicePort, clusterIP bool, external b } // MakePodDisruptionBudget returns a pdb for the zookeeper cluster -func MakePodDisruptionBudget(z *v1beta1.ZookeeperCluster) *policyv1beta1.PodDisruptionBudget { +func MakePodDisruptionBudget(z *v1beta1.ZookeeperCluster) *policyv1.PodDisruptionBudget { pdbCount := intstr.FromInt(int(z.Spec.MaxUnavailableReplicas)) - return &policyv1beta1.PodDisruptionBudget{ + return &policyv1.PodDisruptionBudget{ TypeMeta: metav1.TypeMeta{ Kind: "PodDisruptionBudget", - APIVersion: "policy/v1beta1", + APIVersion: "policy/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: z.GetName(), Namespace: z.Namespace, Labels: z.Spec.Labels, }, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ + Spec: policyv1.PodDisruptionBudgetSpec{ MaxUnavailable: &pdbCount, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ diff --git a/pkg/zk/generators_test.go b/pkg/zk/generators_test.go index 7fdda6aba..f545ddb55 100644 --- a/pkg/zk/generators_test.go +++ b/pkg/zk/generators_test.go @@ -12,18 +12,18 @@ package zk_test import ( "fmt" + "strings" log "github.com/sirupsen/logrus" + policyv1 "k8s.io/api/policy/v1" - "strings" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/pravega/zookeeper-operator/api/v1beta1" "github.com/pravega/zookeeper-operator/pkg/utils" "github.com/pravega/zookeeper-operator/pkg/zk" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -209,6 +209,10 @@ var _ = Describe("Generators Spec", func() { "exampleLabel", "exampleValue")) }) + + It("should have blank topologySpreadConstraints", func() { + Ω(sts.Spec.Template.Spec.TopologySpreadConstraints).To(HaveLen(0)) + }) }) Context("with pod policy annotations", func() { @@ -560,7 +564,7 @@ var _ = Describe("Generators Spec", func() { }) Context("#MakePodDisruptionBudget", func() { - var pdb *policyv1beta1.PodDisruptionBudget + var pdb *policyv1.PodDisruptionBudget var domainName string var zkClusterName string