From d88ce7c8d61b58ff7c2ac1fd4e99842cce32a78a Mon Sep 17 00:00:00 2001 From: Prabhaker24 <54395361+Prabhaker24@users.noreply.github.com> Date: Fri, 14 Aug 2020 12:01:22 +0530 Subject: [PATCH] Issue 63: Added support for ephemeral storage (#215) * added support for ephemeral storage Signed-off-by: prabhaker24 * changed emepheral ete Signed-off-by: prabhaker24 * fixed test Signed-off-by: prabhaker24 * fixed format Signed-off-by: prabhaker24 * doc changes and ut added Signed-off-by: prabhaker24 * fixed doc Signed-off-by: prabhaker24 * changed charts Signed-off-by: prabhaker24 * addressed review comments Signed-off-by: prabhaker24 * fixed ete Signed-off-by: prabhaker24 * changed readme file Signed-off-by: prabhaker24 * addressed comments Signed-off-by: prabhaker24 * fixed charts Signed-off-by: prabhaker24 * addressed comments Signed-off-by: prabhaker24 * changed default storage to persistence Signed-off-by: prabhaker24 * changed doc Signed-off-by: prabhaker24 * changed in zookeeper.yaml Signed-off-by: prabhaker24 * addressed comments Signed-off-by: prabhaker24 * addressed comments Signed-off-by: prabhaker24 * changed structure for ephemeral storage Signed-off-by: prabhaker24 * doc added Signed-off-by: prabhaker24 * fixed readme Signed-off-by: prabhaker24 * addressed comments Signed-off-by: prabhaker24 * addressed comment Signed-off-by: prabhaker24 Co-authored-by: prabhaker24 --- README.md | 33 +++++++ charts/zookeeper/README.md | 3 + charts/zookeeper/templates/zookeeper.yaml | 12 +++ charts/zookeeper/values.yaml | 12 +++ ...zookeeper_v1beta1_zookeepercluster_cr.yaml | 17 ++++ ...zookeeper_v1beta1_zookeepercluster_cr.yaml | 1 + go.mod | 2 + go.sum | 7 ++ .../v1beta1/zookeepercluster_types.go | 45 +++++++--- .../v1beta1/zookeepercluster_types_test.go | 13 +++ .../v1beta1/zz_generated.deepcopy.go | 5 +- .../zookeepercluster_controller.go | 2 +- pkg/test/e2e/e2eutil/spec_util.go | 8 ++ pkg/zk/generators.go | 63 +++++++------ pkg/zk/generators_test.go | 28 ++++++ test/e2e/ephemeral_test.go | 88 +++++++++++++++++++ test/e2e/zookeepercluster_test.go | 1 + 17 files changed, 299 insertions(+), 41 deletions(-) create mode 100644 deploy/cr/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml rename deploy/{crds => cr/pravega}/zookeeper_v1beta1_zookeepercluster_cr.yaml (92%) create mode 100644 test/e2e/ephemeral_test.go diff --git a/README.md b/README.md index 7ad80d09c..78377a60c 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ The project is currently alpha. While no breaking API changes are currently plan * [Usage](#usage) * [Installation of the Operator](#install-the-operator) * [Deploy a sample Zookeeper Cluster](#deploy-a-sample-zookeeper-cluster) + * [Deploy a sample ZooKeeper Cluster with Ephemeral Storage](#Deploy-a-sample-zookeeper-cluster-with-ephemeral-storage) * [Deploy a sample Zookeeper Cluster to a cluster using Istio](#deploy-a-sample-zookeeper-cluster-with-istio) * [Upgrade a Zookeeper Cluster](#upgrade-a-zookeeper-cluster) * [Uninstall the Zookeeper Cluster](#uninstall-the-zookeeper-cluster) @@ -155,6 +156,38 @@ svc/zookeeper-client ClusterIP 10.31.243.173 2181/TCP svc/zookeeper-headless ClusterIP None 2888/TCP,3888/TCP 2m ``` +### Deploy a sample Zookeeper cluster with Ephemeral storage + +Create a Yaml file called `zk.yaml` with the following content to install a 3-node Zookeeper cluster. + +```yaml +apiVersion: "zookeeper.pravega.io/v1beta1" +kind: "ZookeeperCluster" +metadata: + name: "example" +spec: + replicas: 3 + storageType: ephemeral +``` + +``` +$ kubectl create -f zk.yaml +``` + +After a couple of minutes, all cluster members should become ready. + +``` +$ kubectl get zk + +NAME REPLICAS READY REPLICAS VERSION DESIRED VERSION INTERNAL ENDPOINT EXTERNAL ENDPOINT AGE +example 3 3 0.2.7 0.2.7 10.100.200.18:2181 N/A 94s +``` +>Note: User should only provide value for either the field persistence or ephemeral in the spec if none of the values is specified default is persistence + +>Note: In case of ephemeral storage, the cluster may not be able to come back up if more than quorum number of nodes are restarted simultaneously. + +>Note: In case of ephemeral storage, there will be loss of data when the node gets restarted. + ### Deploy a sample Zookeeper cluster with Istio Create a Yaml file called `zk-with-istio.yaml` with the following content to install a 3-node Zookeeper cluster. diff --git a/charts/zookeeper/README.md b/charts/zookeeper/README.md index 681acb5b7..8166e9ff2 100644 --- a/charts/zookeeper/README.md +++ b/charts/zookeeper/README.md @@ -59,6 +59,9 @@ The following table lists the configurable parameters of the Zookeeper chart and | `config.tickTime` | Length of a single tick which is the basic time unit used by Zookeeper (measured in milliseconds) | `2000` | | `config.syncLimit` | Amount of time (in ticks) to allow followers to sync with Zookeeper | `2` | | `config.quorumListenOnAllIPs` | Whether Zookeeper server will listen for connections from its peers on all available IP addresses | `false` | +| `storageType` | Type of storage that can be used it can take either ephemeral or persistence as value | `persistence` | | `persistence.reclaimPolicy` | Reclaim policy for persistent volumes | `Delete` | | `persistence.storageClassName` | Storage class for persistent volumes | `standard` | | `persistence.volumeSize` | Size of the volume requested for persistent volumes | `20Gi` | +| `ephemeral.emptydirvolumesource.medium` | What type of storage medium should back the directory. | `""` | +| `ephemeral.emptydirvolumesource.sizeLimit` | Total amount of local storage required for the EmptyDir volume. | | diff --git a/charts/zookeeper/templates/zookeeper.yaml b/charts/zookeeper/templates/zookeeper.yaml index b6d52104a..33740b674 100644 --- a/charts/zookeeper/templates/zookeeper.yaml +++ b/charts/zookeeper/templates/zookeeper.yaml @@ -43,6 +43,17 @@ spec: syncLimit: {{ .Values.config.syncLimit }} quorumListenOnAllIPs: {{ .Values.config.quorumListenOnAllIPs }} {{- end }} + storageType: {{ .Values.storageType }} + {{- if eq .Values.storageType "ephemeral" }} + ephemeral: + {{- if .Values.ephemeral.emptydirvolumesource }} + emptydirvolumesource: + medium: {{ .Values.ephemeral.emptydirvolumesource.medium }} + {{- if .Values.ephemeral.emptydirvolumesource.sizeLimit }} + sizeLimit: {{ .Values.ephemeral.emptydirvolumesource.sizeLimit }} + {{- end }} + {{- end }} + {{- else }} persistence: reclaimPolicy: {{ .Values.persistence.reclaimPolicy }} spec: @@ -50,3 +61,4 @@ spec: resources: requests: storage: {{ .Values.persistence.volumeSize }} + {{- end }} diff --git a/charts/zookeeper/values.yaml b/charts/zookeeper/values.yaml index 2bcca92e2..3d80d7372 100644 --- a/charts/zookeeper/values.yaml +++ b/charts/zookeeper/values.yaml @@ -26,6 +26,11 @@ config: {} # syncLimit: 2 # quorumListenOnAllIPs: false +## configure the storage type +## accepted values : persistence/ephemeral +## default option is persistence +storageType: persistence + persistence: storageClassName: standard ## specifying reclaim policy for PersistentVolumes @@ -33,6 +38,13 @@ persistence: reclaimPolicy: Delete volumeSize: 20Gi +ephemeral: + emptydirvolumesource: + ## specifying Medium for emptydirvolumesource + ## accepted values - ""/Memory + #medium: "" + #sizeLimit: 20Gi + hooks: image: repository: lachlanevenson/k8s-kubectl diff --git a/deploy/cr/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml b/deploy/cr/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml new file mode 100644 index 000000000..fa55d183d --- /dev/null +++ b/deploy/cr/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml @@ -0,0 +1,17 @@ +apiVersion: zookeeper.pravega.io/v1beta1 +kind: ZookeeperCluster +metadata: + name: zookeeper +spec: + replicas: 3 + image: + repository: pravega/zookeeper + tag: 0.2.8 + storageType: persistence + persistence: + reclaimPolicy: Retain + spec: + storageClassName: "standard" + resources: + requests: + storage: 20Gi diff --git a/deploy/crds/zookeeper_v1beta1_zookeepercluster_cr.yaml b/deploy/cr/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml similarity index 92% rename from deploy/crds/zookeeper_v1beta1_zookeepercluster_cr.yaml rename to deploy/cr/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml index 8bde8c380..be0dafdde 100644 --- a/deploy/crds/zookeeper_v1beta1_zookeepercluster_cr.yaml +++ b/deploy/cr/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml @@ -7,6 +7,7 @@ spec: image: repository: pravega/zookeeper tag: 0.2.8 + storageType: persistence persistence: reclaimPolicy: Delete spec: diff --git a/go.mod b/go.mod index bf906b492..9be7beb59 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,8 @@ require ( github.com/operator-framework/operator-sdk v0.17.0 github.com/pkg/errors v0.9.1 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect + golang.org/x/tools v0.0.0-20200331202046-9d5940d49312 // indirect k8s.io/api v0.17.5 k8s.io/apimachinery v0.17.5 k8s.io/client-go v12.0.0+incompatible diff --git a/go.sum b/go.sum index 2097c0410..d5d559f1e 100644 --- a/go.sum +++ b/go.sum @@ -474,6 +474,7 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -778,6 +779,7 @@ github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSf github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -852,6 +854,8 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1004,9 +1008,12 @@ golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e h1:qCZ8SbsZMjT0OuDPCEBxgLZic4NMj8Gj4vNXiTVRAaA= golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331202046-9d5940d49312 h1:2PHG+Ia3gK1K2kjxZnSylizb//eyaMG8gDFbOG7wLV8= +golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= diff --git a/pkg/apis/zookeeper/v1beta1/zookeepercluster_types.go b/pkg/apis/zookeeper/v1beta1/zookeepercluster_types.go index b884f7156..18d14f753 100644 --- a/pkg/apis/zookeeper/v1beta1/zookeepercluster_types.go +++ b/pkg/apis/zookeeper/v1beta1/zookeepercluster_types.go @@ -12,6 +12,7 @@ package v1beta1 import ( "fmt" + "strings" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -59,13 +60,19 @@ type ZookeeperClusterSpec struct { Ports []v1.ContainerPort `json:"ports,omitempty"` // Pod defines the policy to create pod for the zookeeper cluster. - // // Updating the Pod does not take effect on any existing pods. Pod PodPolicy `json:"pod,omitempty"` + //StorageType is used to tell which type of storage we will be using + //It can take either Ephemeral or persistence + //Default StorageType is Persistence storage + StorageType string `json:"storageType,omitempty"` // Persistence is the configuration for zookeeper persistent layer. // PersistentVolumeClaimSpec and VolumeReclaimPolicy can be specified in here. Persistence *Persistence `json:"persistence,omitempty"` + // Ephemeral is the configuration which helps create ephemeral storage + // At anypoint only one of Persistence or Ephemeral should be present in the manifest + Ephemeral *Ephemeral `json:"ephemeral,omitempty"` // Conf is the zookeeper configuration, which will be used to generate the // static zookeeper configuration. If no configuration is provided required @@ -160,18 +167,27 @@ func (s *ZookeeperClusterSpec) withDefaults(z *ZookeeperCluster) (changed bool) if s.Pod.withDefaults(z) { changed = true } - if s.Persistence == nil { - s.Persistence = &Persistence{} - changed = true - } - if s.Persistence.withDefaults() { - changed = true + if strings.EqualFold(s.StorageType, "ephemeral") { + if s.Ephemeral == nil { + s.Ephemeral = &Ephemeral{} + s.Ephemeral.EmptyDirVolumeSource = v1.EmptyDirVolumeSource{} + changed = true + } + } else { + if s.Persistence == nil { + s.StorageType = "persistence" + s.Persistence = &Persistence{} + changed = true + } + if s.Persistence.withDefaults() { + s.StorageType = "persistence" + changed = true + } } return changed } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // ZookeeperCluster is the Schema for the zookeeperclusters API // +k8s:openapi-gen=true type ZookeeperCluster struct { @@ -400,17 +416,24 @@ type Persistence struct { // The default value is Retain. VolumeReclaimPolicy VolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` // PersistentVolumeClaimSpec is the spec to describe PVC for the container - // This field is optional. If no PVC spec, stateful containers will use - // emptyDir as volume. + // This field is optional. If no PVC is specified default persistentvolume + // will get created. PersistentVolumeClaimSpec v1.PersistentVolumeClaimSpec `json:"spec,omitempty"` } +type Ephemeral struct { + //EmptyDirVolumeSource is optional and this will create the emptydir volume + //It has two parameters Medium and SizeLimit which are optional as well + //Medium specifies What type of storage medium should back this directory. + //SizeLimit specifies Total amount of local storage required for this EmptyDir volume. + EmptyDirVolumeSource v1.EmptyDirVolumeSource `json:"emptydirvolumesource,omitempty"` +} + func (p *Persistence) withDefaults() (changed bool) { if !p.VolumeReclaimPolicy.isValid() { changed = true p.VolumeReclaimPolicy = VolumeReclaimPolicyRetain } - p.PersistentVolumeClaimSpec.AccessModes = []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, } diff --git a/pkg/apis/zookeeper/v1beta1/zookeepercluster_types_test.go b/pkg/apis/zookeeper/v1beta1/zookeepercluster_types_test.go index 772e28019..cc956525d 100644 --- a/pkg/apis/zookeeper/v1beta1/zookeepercluster_types_test.go +++ b/pkg/apis/zookeeper/v1beta1/zookeepercluster_types_test.go @@ -99,6 +99,19 @@ var _ = Describe("ZookeeperCluster Types", func() { }) }) + Context(" Ephemeral Storage", func() { + var z1 v1beta1.ZookeeperCluster + BeforeEach(func() { + z1 = *z.DeepCopy() + z1.Spec.StorageType = "ephemeral" + z1.WithDefaults() + }) + + It("should set the ephemeralstorage and value for EmptyDirVolumeSource.Medium to ''", func() { + Ω(fmt.Sprintf("%s", z1.Spec.Ephemeral.EmptyDirVolumeSource.Medium)).To(Equal("")) + }) + }) + Context("Conf", func() { var c v1beta1.ZookeeperConfig diff --git a/pkg/apis/zookeeper/v1beta1/zz_generated.deepcopy.go b/pkg/apis/zookeeper/v1beta1/zz_generated.deepcopy.go index 75152243c..b1c12aa9a 100644 --- a/pkg/apis/zookeeper/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/zookeeper/v1beta1/zz_generated.deepcopy.go @@ -225,9 +225,8 @@ func (in *ZookeeperClusterSpec) DeepCopyInto(out *ZookeeperClusterSpec) { } in.Pod.DeepCopyInto(&out.Pod) if in.Persistence != nil { - in, out := &in.Persistence, &out.Persistence - *out = new(Persistence) - (*in).DeepCopyInto(*out) + in, out := *in.Persistence, *out.Persistence + (in).DeepCopyInto(&out) } out.Conf = in.Conf return diff --git a/pkg/controller/zookeepercluster/zookeepercluster_controller.go b/pkg/controller/zookeepercluster/zookeepercluster_controller.go index 2d6f7ba80..4bf432e1e 100644 --- a/pkg/controller/zookeepercluster/zookeepercluster_controller.go +++ b/pkg/controller/zookeepercluster/zookeepercluster_controller.go @@ -610,7 +610,7 @@ func (r *ReconcileZookeeperCluster) yamlConfigMap(instance *zookeeperv1beta1.Zoo } func (r *ReconcileZookeeperCluster) reconcileFinalizers(instance *zookeeperv1beta1.ZookeeperCluster) (err error) { - if instance.Spec.Persistence.VolumeReclaimPolicy != zookeeperv1beta1.VolumeReclaimPolicyDelete { + if instance.Spec.Persistence != nil && instance.Spec.Persistence.VolumeReclaimPolicy != zookeeperv1beta1.VolumeReclaimPolicyDelete { return nil } if instance.DeletionTimestamp.IsZero() { diff --git a/pkg/test/e2e/e2eutil/spec_util.go b/pkg/test/e2e/e2eutil/spec_util.go index 72c429bdd..9594ef9e2 100644 --- a/pkg/test/e2e/e2eutil/spec_util.go +++ b/pkg/test/e2e/e2eutil/spec_util.go @@ -40,3 +40,11 @@ func NewClusterWithVersion(namespace, version string) *api.ZookeeperCluster { } return cluster } + +func NewClusterWithEmptyDir(namespace string) *api.ZookeeperCluster { + cluster := NewDefaultCluster(namespace) + cluster.Spec = api.ZookeeperClusterSpec{ + StorageType: "ephemeral", + } + return cluster +} diff --git a/pkg/zk/generators.go b/pkg/zk/generators.go index 32b78c764..79b88c6f4 100644 --- a/pkg/zk/generators.go +++ b/pkg/zk/generators.go @@ -37,8 +37,29 @@ func headlessSvcName(z *v1beta1.ZookeeperCluster) string { return fmt.Sprintf("%s-headless", z.GetName()) } +var zkDataVolume = "data" + // MakeStatefulSet return a zookeeper stateful set from the zk spec func MakeStatefulSet(z *v1beta1.ZookeeperCluster) *appsv1.StatefulSet { + extraVolumes := []v1.Volume{} + persistence := z.Spec.Persistence + pvcs := []v1.PersistentVolumeClaim{} + if strings.EqualFold(z.Spec.StorageType, "ephemeral") { + extraVolumes = append(extraVolumes, v1.Volume{ + Name: zkDataVolume, + VolumeSource: v1.VolumeSource{ + EmptyDir: &z.Spec.Ephemeral.EmptyDirVolumeSource, + }, + }) + } else { + pvcs = append(pvcs, v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: zkDataVolume, + Labels: map[string]string{"app": z.GetName()}, + }, + Spec: persistence.PersistentVolumeClaimSpec, + }) + } return &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", @@ -69,23 +90,14 @@ func MakeStatefulSet(z *v1beta1.ZookeeperCluster) *appsv1.StatefulSet { "kind": "ZookeeperMember", }, }, - Spec: makeZkPodSpec(z), - }, - VolumeClaimTemplates: []v1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "data", - Namespace: z.Namespace, - Labels: map[string]string{"app": z.GetName()}, - }, - Spec: z.Spec.Persistence.PersistentVolumeClaimSpec, - }, + Spec: makeZkPodSpec(z, extraVolumes), }, + VolumeClaimTemplates: pvcs, }, } } -func makeZkPodSpec(z *v1beta1.ZookeeperCluster) v1.PodSpec { +func makeZkPodSpec(z *v1beta1.ZookeeperCluster, volumes []v1.Volume) v1.PodSpec { zkContainer := v1.Container{ Name: "zookeeper", Image: z.Spec.Image.ToString(), @@ -131,30 +143,29 @@ func makeZkPodSpec(z *v1beta1.ZookeeperCluster) v1.PodSpec { if z.Spec.Pod.Resources.Limits != nil || z.Spec.Pod.Resources.Requests != nil { zkContainer.Resources = z.Spec.Pod.Resources } + volumes = append(volumes, v1.Volume{ + Name: "conf", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: z.ConfigMapName(), + }, + }, + }, + }) + zkContainer.Env = append(zkContainer.Env, z.Spec.Pod.Env...) podSpec := v1.PodSpec{ Containers: []v1.Container{zkContainer}, Affinity: z.Spec.Pod.Affinity, - Volumes: []v1.Volume{ - { - Name: "conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: z.ConfigMapName(), - }, - }, - }, - }, - }, - TerminationGracePeriodSeconds: &z.Spec.Pod.TerminationGracePeriodSeconds, + Volumes: volumes, } if reflect.DeepEqual(v1.PodSecurityContext{}, z.Spec.Pod.SecurityContext) { podSpec.SecurityContext = z.Spec.Pod.SecurityContext } podSpec.NodeSelector = z.Spec.Pod.NodeSelector podSpec.Tolerations = z.Spec.Pod.Tolerations - + podSpec.TerminationGracePeriodSeconds = &z.Spec.Pod.TerminationGracePeriodSeconds return podSpec } diff --git a/pkg/zk/generators_test.go b/pkg/zk/generators_test.go index 183bde683..dc3c20567 100644 --- a/pkg/zk/generators_test.go +++ b/pkg/zk/generators_test.go @@ -11,11 +11,14 @@ package zk_test import ( + "fmt" + "strings" "testing" "github.com/pravega/zookeeper-operator/pkg/apis/zookeeper/v1beta1" "github.com/pravega/zookeeper-operator/pkg/utils" "github.com/pravega/zookeeper-operator/pkg/zk" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -149,6 +152,31 @@ var _ = Describe("Generators Spec", func() { }) }) + Context("#MakeStatefulSet with Ephemeral storage", func() { + var sts *appsv1.StatefulSet + + Context("with defaults", func() { + + BeforeEach(func() { + z := &v1beta1.ZookeeperCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + }, + Spec: v1beta1.ZookeeperClusterSpec{}, + } + z.Spec = v1beta1.ZookeeperClusterSpec{ + StorageType: "ephemeral", + } + z.WithDefaults() + sts = zk.MakeStatefulSet(z) + }) + It("Checking the sts spec contains volumesource as EmptyDir", func() { + Ω(strings.ContainsAny(fmt.Sprintf("%v", sts.Spec.Template.Spec.Volumes), "EmptyDirVolumeSource")).Should(Equal(true)) + }) + }) + }) + Context("#MakeClientService", func() { var s *v1.Service var domainName string diff --git a/test/e2e/ephemeral_test.go b/test/e2e/ephemeral_test.go new file mode 100644 index 000000000..74eb0dc7b --- /dev/null +++ b/test/e2e/ephemeral_test.go @@ -0,0 +1,88 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + . "github.com/onsi/gomega" + framework "github.com/operator-framework/operator-sdk/pkg/test" + zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" +) + +// Test create and recreate a Zookeeper cluster with the same name +func testEphemeralStorage(t *testing.T) { + g := NewGomegaWithT(t) + + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + g.Expect(err).NotTo(HaveOccurred()) + f := framework.Global + + defaultCluster := zk_e2eutil.NewClusterWithEmptyDir(namespace) + defaultCluster.WithDefaults() + defaultCluster.Status.Init() + + zk, err := zk_e2eutil.CreateCluster(t, f, ctx, defaultCluster) + g.Expect(err).NotTo(HaveOccurred()) + + // A default Zookeeper cluster should have 3 replicas + podSize := 3 + err = zk_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, zk, podSize) + g.Expect(err).NotTo(HaveOccurred()) + + // This is to get the latest zk cluster object + zk, err = zk_e2eutil.GetCluster(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + + // Scale up zk cluster, increase replicas to 5 + + zk.Spec.Replicas = 5 + podSize = 5 + + err = zk_e2eutil.UpdateCluster(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + + err = zk_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, zk, podSize) + g.Expect(err).NotTo(HaveOccurred()) + + // This is to get the latest zk cluster object + zk, err = zk_e2eutil.GetCluster(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + + // Scale down zk cluster back to default + zk.Spec.Replicas = 3 + podSize = 3 + + err = zk_e2eutil.UpdateCluster(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + + err = zk_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, zk, podSize) + g.Expect(err).NotTo(HaveOccurred()) + + // Delete cluster + err = zk_e2eutil.DeleteCluster(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = zk_e2eutil.WaitForClusterToTerminate(t, f, ctx, zk) + g.Expect(err).NotTo(HaveOccurred()) + +} diff --git a/test/e2e/zookeepercluster_test.go b/test/e2e/zookeepercluster_test.go index b4262f5ae..b708c79f5 100644 --- a/test/e2e/zookeepercluster_test.go +++ b/test/e2e/zookeepercluster_test.go @@ -62,6 +62,7 @@ func testZookeeperCluster(t *testing.T) { "testUpgradeCluster": testUpgradeCluster, "testCreateRecreateCluster": testCreateRecreateCluster, "testScaleCluster": testScaleCluster, + "testEphemeralStorage": testEphemeralStorage, } for name, f := range testFuncs {