diff --git a/GNUmakefile b/GNUmakefile index 947e22e85a..655e4f5bd6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -72,7 +72,7 @@ ifeq (${IMAGE_TAG}, ) endif ifeq (${TRAVIS_TAG}, ) - BASE_TAG = ci + BASE_TAG = v1.0.x-ci export BASE_TAG else BASE_TAG = ${TRAVIS_TAG} diff --git a/cmd/provisioner-localpv/app/config.go b/cmd/provisioner-localpv/app/config.go index 0894467f1f..c53eebb40c 100644 --- a/cmd/provisioner-localpv/app/config.go +++ b/cmd/provisioner-localpv/app/config.go @@ -190,3 +190,12 @@ func GetStorageClassName(pvc *v1.PersistentVolumeClaim) *string { } return pvc.Spec.StorageClassName } + +// GetLocalPVType extracts the Local PV Type from PV +func GetLocalPVType(pv *v1.PersistentVolume) string { + casType, found := pv.Labels[string(mconfig.CASTypeKey)] + if found { + return casType + } + return "" +} diff --git a/cmd/provisioner-localpv/app/helper.go b/cmd/provisioner-localpv/app/helper.go deleted file mode 100644 index f2764fbcf9..0000000000 --- a/cmd/provisioner-localpv/app/helper.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This code was taken from https://github.com/rancher/local-path-provisioner -and modified to work with the configuration options used by OpenEBS -*/ - -package app - -import ( - "fmt" - "path/filepath" - //"strings" - "time" - - "github.com/golang/glog" - //"github.com/pkg/errors" - - hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1" - - mContainer "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" - mPod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" - mVolume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - //CmdTimeoutCounts specifies the duration to wait for cleanup pod to be launched. - CmdTimeoutCounts = 120 -) - -// getPathAndNodeForPV inspects the PV spec to determine the host path used -// and the node (via the NodeAffinity) on which host path exists. -func (p *Provisioner) getPathAndNodeForPV(pv *v1.PersistentVolume) (path, node string, err error) { - hostPath := pv.Spec.PersistentVolumeSource.HostPath - if hostPath == nil { - return "", "", fmt.Errorf("no HostPath set") - } - path = hostPath.Path - - nodeAffinity := pv.Spec.NodeAffinity - if nodeAffinity == nil { - return "", "", fmt.Errorf("no NodeAffinity set") - } - required := nodeAffinity.Required - if required == nil { - return "", "", fmt.Errorf("no NodeAffinity.Required set") - } - - node = "" - for _, selectorTerm := range required.NodeSelectorTerms { - for _, expression := range selectorTerm.MatchExpressions { - if expression.Key == KeyNode && expression.Operator == v1.NodeSelectorOpIn { - if len(expression.Values) != 1 { - return "", "", fmt.Errorf("multiple values for the node affinity") - } - node = expression.Values[0] - break - } - } - if node != "" { - break - } - } - if node == "" { - return "", "", fmt.Errorf("cannot find affinited node") - } - return path, node, nil -} - -// createCleanupPod launches a helper(busybox) pod, to delete the host path. -// This provisioner expects that the host paths are created using -// an unique PV path - under a given BasePath. From the absolute path, -// it extracts the base path and the PV path. The helper pod is then launched -// by mounting the base path - and performing a delete on the unique PV path. -func (p *Provisioner) createCleanupPod(pOpts *HelperPodOptions) (err error) { - //func (p *Provisioner) createCleanupPod(cmdsForPath []string, name, path, node string) (err error) { - if pOpts.name == "" || pOpts.path == "" || pOpts.nodeName == "" { - return fmt.Errorf("invalid empty name or path or node") - } - - // Initialize HostPath builder and validate that - // non-root directories are not passed for delete - // Extract the base path and the volume unique path. - parentDir, volumeDir, vErr := hostpath.NewBuilder().WithPath(pOpts.path). - WithCheckf(hostpath.IsNonRoot(), "path should not be a root directory: path %v", pOpts.path). - ExtractSubPath() - if vErr != nil { - return vErr - } - - conObj, _ := mContainer.Builder(). - WithName("local-path-cleanup"). - WithImage(p.helperImage). - WithCommand(append(pOpts.cmdsForPath, filepath.Join("/data/", volumeDir))). - WithVolumeMounts([]v1.VolumeMount{ - { - Name: "data", - ReadOnly: false, - MountPath: "/data/", - }, - }). - Build() - containers := []v1.Container{conObj} - - volObj, _ := mVolume.NewBuilder(). - WithName("data"). - WithHostDirectory(parentDir). - Build() - volumes := []v1.Volume{*volObj} - - helperPod, _ := mPod.NewBuilder(). - WithName("cleanup-" + pOpts.name). - WithRestartPolicy(v1.RestartPolicyNever). - WithNodeName(pOpts.nodeName). - WithContainers(containers). - WithVolumes(volumes). - Build() - - //Launch the cleanup pod. - pod, err := p.kubeClient.CoreV1().Pods(p.namespace).Create(helperPod) - if err != nil { - return err - } - - defer func() { - e := p.kubeClient.CoreV1().Pods(p.namespace).Delete(pod.Name, &metav1.DeleteOptions{}) - if e != nil { - glog.Errorf("unable to delete the helper pod: %v", e) - } - }() - - //Wait for the cleanup pod to complete it job and exit - completed := false - for i := 0; i < CmdTimeoutCounts; i++ { - if pod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { - return err - } else if pod.Status.Phase == v1.PodSucceeded { - completed = true - break - } - time.Sleep(1 * time.Second) - } - if !completed { - return fmt.Errorf("create process timeout after %v seconds", CmdTimeoutCounts) - } - - glog.Infof("Volume %v has been cleaned on %v:%v", pOpts.name, pOpts.nodeName, pOpts.path) - return nil -} diff --git a/cmd/provisioner-localpv/app/helper_blockdevice.go b/cmd/provisioner-localpv/app/helper_blockdevice.go new file mode 100644 index 0000000000..b6b1da5296 --- /dev/null +++ b/cmd/provisioner-localpv/app/helper_blockdevice.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This code was taken from https://github.com/rancher/local-path-provisioner +and modified to work with the configuration options used by OpenEBS +*/ + +package app + +import ( + //"fmt" + //"path/filepath" + //"strings" + "time" + + "github.com/golang/glog" + //"github.com/pkg/errors" + errors "github.com/openebs/maya/pkg/errors/v1alpha1" + + //hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1" + + //container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + //pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" + //volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" + blockdevice "github.com/openebs/maya/pkg/blockdevice/v1alpha2" + blockdeviceclaim "github.com/openebs/maya/pkg/blockdeviceclaim/v1alpha1" + corev1 "k8s.io/api/core/v1" + //ndmv1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + bdcStorageClassAnnotation = "local.openebs.io/blockdeviceclaim" +) + +//TODO +var ( + //WaitForBDTimeoutCounts specifies the duration to wait for BDC to be associated with a BD + //The duration is the value specified here multiplied by 5 + WaitForBDTimeoutCounts = 12 +) + +// HelperBlockDeviceOptions contains the options that +// will launch a BDC on a specific node (nodeName) +type HelperBlockDeviceOptions struct { + nodeName string + name string + capacity string + // deviceType string + bdcName string +} + +// validate checks that the required fields to create BDC +// are available +func (blkDevOpts *HelperBlockDeviceOptions) validate() error { + glog.Infof("Validate Block Device Options") + if blkDevOpts.name == "" || blkDevOpts.nodeName == "" { + return errors.Errorf("invalid empty name or node") + } + return nil +} + +// hasBDC checks if the bdcName has already been determined +func (blkDevOpts *HelperBlockDeviceOptions) hasBDC() bool { + glog.Infof("Already has BDC %t", blkDevOpts.bdcName != "") + return blkDevOpts.bdcName != "" +} + +// setBlcokDeviceClaimFromPV inspects the PV and fetches the BDC associated +// with the Local PV. +func (blkDevOpts *HelperBlockDeviceOptions) setBlockDeviceClaimFromPV(pv *corev1.PersistentVolume) { + glog.Infof("Setting Block Device Claim From PV") + bdc, found := pv.Annotations[bdcStorageClassAnnotation] + if found { + blkDevOpts.bdcName = bdc + } +} + +// createBlockDeviceClaim creates a new BlockDeviceClaim for a given +// Local PV +func (p *Provisioner) createBlockDeviceClaim(blkDevOpts *HelperBlockDeviceOptions) error { + glog.Infof("Creating Block Device Claim") + if err := blkDevOpts.validate(); err != nil { + return err + } + + //Create a BDC for this PV (of type device). NDM will + //look for the device matching the capacity and node on which + //pod is being scheduled. Since this BDC is specific to a PV + //use the name of the bdc to be: "bdc-" + //TODO: Look into setting the labels and owner references + //on BDC with PV/PVC details. + bdcName := "bdc-" + blkDevOpts.name + + //Check if the BDC is already created. This can happen + //if the previous reconcilation of PVC-PV, resulted in + //creating a BDC, but BD was not yet available for 60+ seconds + _, err := blockdeviceclaim.NewKubeClient(). + WithNamespace(p.namespace). + Get(bdcName, metav1.GetOptions{}) + if err == nil { + blkDevOpts.bdcName = bdcName + glog.Infof("Volume %v has been initialized with BDC:%v", blkDevOpts.name, bdcName) + return nil + } + + bdcObj, err := blockdeviceclaim.NewBuilder(). + WithNamespace(p.namespace). + WithName(bdcName). + WithHostName(blkDevOpts.nodeName). + WithCapacity(blkDevOpts.capacity). + Build() + + if err != nil { + //TODO : Need to relook at this error + return errors.Wrapf(err, "unable to build BDC") + } + + _, err = blockdeviceclaim.NewKubeClient(). + WithNamespace(p.namespace). + Create(bdcObj.Object) + + if err != nil { + //TODO : Need to relook at this error + //If the error is about BDC being already present, then return nil + return errors.Wrapf(err, "failed to create BDC{%v}", bdcName) + } + + blkDevOpts.bdcName = bdcName + + return nil +} + +// getBlockDevicePath fetches the BDC associated with this Local PV +// or creates one. From the BDC, fetch the BD and get the path +func (p *Provisioner) getBlockDevicePath(blkDevOpts *HelperBlockDeviceOptions) (string, string, error) { + + glog.Infof("Getting Block Device Path") + if !blkDevOpts.hasBDC() { + err := p.createBlockDeviceClaim(blkDevOpts) + if err != nil { + return "", "", err + } + } + + //TODO + glog.Infof("Getting Block Device Path from BDC %v", blkDevOpts.bdcName) + bdName := "" + //Check if the BDC is created + for i := 0; i < WaitForBDTimeoutCounts; i++ { + + bdc, err := blockdeviceclaim.NewKubeClient(). + WithNamespace(p.namespace). + Get(blkDevOpts.bdcName, metav1.GetOptions{}) + if err != nil { + //TODO : Need to relook at this error + //If the error is about BDC being already present, then return nil + return "", "", errors.Errorf("unable to get BDC %v associated with PV:%v %v", blkDevOpts.bdcName, blkDevOpts.name, err) + } + + bdName = bdc.Spec.BlockDeviceName + //Check if the BDC is associated with a BD + if bdName == "" { + time.Sleep(5 * time.Second) + } else { + break + } + } + + //Get the BD Path. + bd, err := blockdevice.NewKubeClient(). + WithNamespace(p.namespace). + Get(bdName, metav1.GetOptions{}) + if err != nil { + //TODO : Need to relook at this error + //If the error is about BDC being already present, then return nil + return "", "", errors.Errorf("unable to find BD:%v for BDC:%v associated with PV:%v", bdName, blkDevOpts.bdcName, blkDevOpts.name) + } + + path := bd.Spec.FileSystem.Mountpoint + blkPath := bd.Spec.Path + if len(bd.Spec.DevLinks) > 0 { + //TODO : Iterate and get the first path by id. + blkPath = bd.Spec.DevLinks[0].Links[0] + } + + return path, blkPath, nil +} + +// deleteBlockDeviceClaim deletes the BlockDeviceClaim associated with the +// PV being deleted. +func (p *Provisioner) deleteBlockDeviceClaim(blkDevOpts *HelperBlockDeviceOptions) error { + glog.Infof("Delete Block Device Claim") + if !blkDevOpts.hasBDC() { + return nil + } + + //TODO: Issue a delete BDC request + err := blockdeviceclaim.NewKubeClient(). + WithNamespace(p.namespace). + Delete(blkDevOpts.bdcName, &metav1.DeleteOptions{}) + + if err != nil { + //TODO : Need to relook at this error + return errors.Errorf("unable to delete BDC %v associated with PV:%v", blkDevOpts.bdcName, blkDevOpts.name) + } + return nil +} diff --git a/cmd/provisioner-localpv/app/helper_hostpath.go b/cmd/provisioner-localpv/app/helper_hostpath.go new file mode 100644 index 0000000000..e4f4d2cde1 --- /dev/null +++ b/cmd/provisioner-localpv/app/helper_hostpath.go @@ -0,0 +1,285 @@ +/* +Copyright 2019 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This code was taken from https://github.com/rancher/local-path-provisioner +and modified to work with the configuration options used by OpenEBS +*/ + +package app + +import ( + //"fmt" + "path/filepath" + //"strings" + "time" + + "github.com/golang/glog" + //"github.com/pkg/errors" + errors "github.com/openebs/maya/pkg/errors/v1alpha1" + + hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1" + + container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" + volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + //CmdTimeoutCounts specifies the duration to wait for cleanup pod to be launched. + CmdTimeoutCounts = 120 +) + +// HelperPodOptions contains the options that +// will launch a Pod on a specific node (nodeName) +// to execute a command (cmdsForPath) on a given +// volume path (path) +type HelperPodOptions struct { + //nodeName represents the host where pod should be launched. + nodeName string + //name is the name of the PV for which the pod is being launched + name string + //cmdsForPath represent either create (mkdir) or delete(rm) + //commands that need to be executed on the volume path. + cmdsForPath []string + //path is the volume hostpath directory + path string +} + +// validate checks that the required fields to launch +// helper pods are valid. helper pods are used to either +// create or delete a directory (path) on a given node (nodeName). +// name refers to the volume being created or deleted. +func (pOpts *HelperPodOptions) validate() error { + if pOpts.name == "" || pOpts.path == "" || pOpts.nodeName == "" { + return errors.Errorf("invalid empty name or path or node") + } + return nil +} + +// getPathAndNodeForPV inspects the PV spec to determine the hostpath +// and the node of OpenEBS Local PV. Both types of OpenEBS Local PV +// (storage type = hostpath and device) use: +// - LocalVolumeSource to specify the path and +// - NodeAffinity to specify the node. +// Note: This function also takes care of deleting OpenEBS Local PVs +// provisioned in 0.9, which were using HostPathVolumeSource to +// specify the path. +func (p *Provisioner) getPathAndNodeForPV(pv *corev1.PersistentVolume) (string, string, error) { + path := "" + local := pv.Spec.PersistentVolumeSource.Local + if local == nil { + //Handle the case of Local PV created in 0.9 using HostPathVolumeSource + hostPath := pv.Spec.PersistentVolumeSource.HostPath + if hostPath == nil { + return "", "", errors.Errorf("no HostPath set") + } + path = hostPath.Path + } else { + path = local.Path + } + + nodeAffinity := pv.Spec.NodeAffinity + if nodeAffinity == nil { + return "", "", errors.Errorf("no NodeAffinity set") + } + required := nodeAffinity.Required + if required == nil { + return "", "", errors.Errorf("no NodeAffinity.Required set") + } + + node := "" + for _, selectorTerm := range required.NodeSelectorTerms { + for _, expression := range selectorTerm.MatchExpressions { + if expression.Key == KeyNode && expression.Operator == corev1.NodeSelectorOpIn { + if len(expression.Values) != 1 { + return "", "", errors.Errorf("multiple values for the node affinity") + } + node = expression.Values[0] + break + } + } + if node != "" { + break + } + } + if node == "" { + return "", "", errors.Errorf("cannot find affinited node") + } + return path, node, nil +} + +// createInitPod launches a helper(busybox) pod, to create the host path. +// The local pv expect the hostpath to be already present before mounting +// into pod. Validate that the local pv host path is not created under root. +func (p *Provisioner) createInitPod(pOpts *HelperPodOptions) error { + //err := pOpts.validate() + if err := pOpts.validate(); err != nil { + return err + } + + // Initialize HostPath builder and validate that + // volume directory is not directly under root. + // Extract the base path and the volume unique path. + parentDir, volumeDir, vErr := hostpath.NewBuilder().WithPath(pOpts.path). + WithCheckf(hostpath.IsNonRoot(), "volume directory {%v} should not be under root directory", pOpts.path). + ExtractSubPath() + if vErr != nil { + return vErr + } + + conObj, _ := container.NewBuilder(). + WithName("local-path-init"). + WithImage(p.helperImage). + WithCommand(append(pOpts.cmdsForPath, filepath.Join("/data/", volumeDir))). + WithVolumeMounts([]corev1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data/", + }, + }). + Build() + //containers := []v1.Container{conObj} + + volObj, _ := volume.NewBuilder(). + WithName("data"). + WithHostDirectory(parentDir). + Build() + //volumes := []v1.Volume{*volObj} + + helperPod, _ := pod.NewBuilder(). + WithName("init-" + pOpts.name). + WithRestartPolicy(corev1.RestartPolicyNever). + WithNodeName(pOpts.nodeName). + WithContainer(conObj). + WithVolume(*volObj). + Build() + + //Launch the init pod. + hPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Create(helperPod) + if err != nil { + return err + } + + defer func() { + e := p.kubeClient.CoreV1().Pods(p.namespace).Delete(hPod.Name, &metav1.DeleteOptions{}) + if e != nil { + glog.Errorf("unable to delete the helper pod: %v", e) + } + }() + + //Wait for the cleanup pod to complete it job and exit + completed := false + for i := 0; i < CmdTimeoutCounts; i++ { + checkPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(hPod.Name, metav1.GetOptions{}) + if err != nil { + return err + } else if checkPod.Status.Phase == corev1.PodSucceeded { + completed = true + break + } + time.Sleep(1 * time.Second) + } + if !completed { + return errors.Errorf("create process timeout after %v seconds", CmdTimeoutCounts) + } + + //glog.Infof("Volume %v has been initialized on %v:%v", pOpts.name, pOpts.nodeName, pOpts.path) + return nil +} + +// createCleanupPod launches a helper(busybox) pod, to delete the host path. +// This provisioner expects that the host paths are created using +// an unique PV path - under a given BasePath. From the absolute path, +// it extracts the base path and the PV path. The helper pod is then launched +// by mounting the base path - and performing a delete on the unique PV path. +func (p *Provisioner) createCleanupPod(pOpts *HelperPodOptions) error { + //err := pOpts.validate() + if err := pOpts.validate(); err != nil { + return err + } + + // Initialize HostPath builder and validate that + // volume directory is not directly under root. + // Extract the base path and the volume unique path. + parentDir, volumeDir, vErr := hostpath.NewBuilder().WithPath(pOpts.path). + WithCheckf(hostpath.IsNonRoot(), "volume directory {%v} should not be under root directory", pOpts.path). + ExtractSubPath() + if vErr != nil { + return vErr + } + + conObj, _ := container.NewBuilder(). + WithName("local-path-cleanup"). + WithImage(p.helperImage). + WithCommand(append(pOpts.cmdsForPath, filepath.Join("/data/", volumeDir))). + WithVolumeMounts([]corev1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data/", + }, + }). + Build() + //containers := []v1.Container{conObj} + + volObj, _ := volume.NewBuilder(). + WithName("data"). + WithHostDirectory(parentDir). + Build() + //volumes := []v1.Volume{*volObj} + + helperPod, _ := pod.NewBuilder(). + WithName("cleanup-" + pOpts.name). + WithRestartPolicy(corev1.RestartPolicyNever). + WithNodeName(pOpts.nodeName). + WithContainer(conObj). + WithVolume(*volObj). + Build() + + //Launch the cleanup pod. + hPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Create(helperPod) + if err != nil { + return err + } + + defer func() { + e := p.kubeClient.CoreV1().Pods(p.namespace).Delete(hPod.Name, &metav1.DeleteOptions{}) + if e != nil { + glog.Errorf("unable to delete the helper pod: %v", e) + } + }() + + //Wait for the cleanup pod to complete it job and exit + completed := false + for i := 0; i < CmdTimeoutCounts; i++ { + checkPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(hPod.Name, metav1.GetOptions{}) + if err != nil { + return err + } else if checkPod.Status.Phase == corev1.PodSucceeded { + completed = true + break + } + time.Sleep(1 * time.Second) + } + if !completed { + return errors.Errorf("create process timeout after %v seconds", CmdTimeoutCounts) + } + + glog.Infof("Volume %v has been cleaned on %v:%v", pOpts.name, pOpts.nodeName, pOpts.path) + return nil +} diff --git a/cmd/provisioner-localpv/app/helperpod_opts.go b/cmd/provisioner-localpv/app/helperpod_opts.go deleted file mode 100644 index dc1856cc12..0000000000 --- a/cmd/provisioner-localpv/app/helperpod_opts.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*/ - -package app - -// HelperPodOptions contains the options that -// will be extracted from the persistent volume -type HelperPodOptions struct { - cmdsForPath []string - name string - path string - nodeName string -} diff --git a/cmd/provisioner-localpv/app/provisioner.go b/cmd/provisioner-localpv/app/provisioner.go index f7f753ed12..896e7b2161 100644 --- a/cmd/provisioner-localpv/app/provisioner.go +++ b/cmd/provisioner-localpv/app/provisioner.go @@ -40,7 +40,6 @@ import ( pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - mPV "github.com/openebs/maya/pkg/kubernetes/persistentvolume/v1alpha1" "k8s.io/api/core/v1" //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -97,7 +96,7 @@ func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.Persistent return nil, fmt.Errorf("Only support ReadWriteOnce access mode") } } - node := opts.SelectedNode + //node := opts.SelectedNode if opts.SelectedNode == nil { return nil, fmt.Errorf("configuration error, no node was specified") } @@ -114,52 +113,14 @@ func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.Persistent //TODO: Determine if hostpath or device based Local PV should be created stgType := pvCASConfig.GetStorageType() - if stgType != "hostpath" { - return nil, fmt.Errorf("PV with StorageType %v is not supported", stgType) + if stgType == "hostpath" { + return p.ProvisionHostPath(opts, pvCASConfig) } - - path, err := pvCASConfig.GetPath() - if err != nil { - return nil, err - } - - glog.Infof("Creating volume %v at %v:%v", name, node.Name, path) - - // VolumeMode will always be specified as Filesystem for host path volume, - // and the value passed in from the PVC spec will be ignored. - fs := v1.PersistentVolumeFilesystem - - // It is possible that the HostPath doesn't already exist on the node. - // Set the Local PV to create it. - //hostPathType := v1.HostPathDirectoryOrCreate - - // TODO initialize the Labels and annotations - // Use annotations to specify the context using which the PV was created. - //volAnnotations := make(map[string]string) - //volAnnotations[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType - //fstype := casVolume.Spec.FSType - - //labels := make(map[string]string) - //labels[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType - //labels[string(v1alpha1.StorageClassKey)] = *className - - //TODO Change the following to a builder pattern - pvObj, err := mPV.NewBuilder(). - WithName(name). - WithReclaimPolicy(opts.PersistentVolumeReclaimPolicy). - WithAccessModes(pvc.Spec.AccessModes). - WithVolumeMode(fs). - WithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]). - WithHostDirectory(path). - WithNodeAffinity(node.Name). - Build() - - if err != nil { - return nil, err + if stgType == "device" { + return p.ProvisionBlockDevice(opts, pvCASConfig) } - return pvObj, nil - + return nil, fmt.Errorf("PV with StorageType %v is not supported", stgType) } // Delete is invoked by the PVC controller to perform clean-up @@ -171,30 +132,16 @@ func (p *Provisioner) Delete(pv *v1.PersistentVolume) (err error) { err = errors.Wrapf(err, "failed to delete volume %v", pv.Name) }() - //Determine the path and node of the Local PV. - path, node, err := p.getPathAndNodeForPV(pv) - if err != nil { - return err - } - //Initiate clean up only when reclaim policy is not retain. if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain { - glog.Infof("Deleting volume %v at %v:%v", pv.Name, node, path) - cleanupCmdsForPath := []string{"rm", "-rf"} - podOpts := &HelperPodOptions{ - cmdsForPath: cleanupCmdsForPath, - name: pv.Name, - path: path, - nodeName: node, + //TODO: Determine the type of PV + pvType := GetLocalPVType(pv) + if pvType == "local-device" { + return p.DeleteBlockDevice(pv) } - - //if err := p.createCleanupPod(cleanupCmdsForPath, pv.Name, path, node); err != nil { - if err := p.createCleanupPod(podOpts); err != nil { - glog.Infof("clean up volume %v failed: %v", pv.Name, err) - return err - } - return nil + return p.DeleteHostPath(pv) } + glog.Infof("Retained volume %v", pv.Name) return nil } diff --git a/cmd/provisioner-localpv/app/provisioner_blockdevice.go b/cmd/provisioner-localpv/app/provisioner_blockdevice.go new file mode 100644 index 0000000000..c0a73e34ad --- /dev/null +++ b/cmd/provisioner-localpv/app/provisioner_blockdevice.go @@ -0,0 +1,119 @@ +/* +Copyright 2019 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "github.com/golang/glog" + "github.com/pkg/errors" + + pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" + mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + mPV "github.com/openebs/maya/pkg/kubernetes/persistentvolume/v1alpha1" + "k8s.io/api/core/v1" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ProvisionBlockDevice is invoked by the Provisioner to create a Local PV +// with a Block Device +func (p *Provisioner) ProvisionBlockDevice(opts pvController.VolumeOptions, volumeConfig *VolumeConfig) (*v1.PersistentVolume, error) { + pvc := opts.PVC + node := opts.SelectedNode + name := opts.PVName + capacity := opts.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + stgType := volumeConfig.GetStorageType() + + //Extract the details to create a Block Device Claim + blkDevOpts := &HelperBlockDeviceOptions{ + nodeName: node.Name, + name: name, + capacity: capacity.String(), + } + + path, blkPath, err := p.getBlockDevicePath(blkDevOpts) + if err != nil { + glog.Infof("Initialize volume %v failed: %v", name, err) + return nil, err + } + + glog.Infof("Creating volume %v on %v at %v(%v)", name, node.Name, path, blkPath) + + // TODO + // VolumeMode will always be specified as Filesystem for host path volume, + // and the value passed in from the PVC spec will be ignored. + fs := v1.PersistentVolumeFilesystem + + // It is possible that the HostPath doesn't already exist on the node. + // Set the Local PV to create it. + //hostPathType := v1.HostPathDirectoryOrCreate + + // TODO initialize the Labels and annotations + // Use annotations to specify the context using which the PV was created. + volAnnotations := make(map[string]string) + volAnnotations[bdcStorageClassAnnotation] = blkDevOpts.bdcName + //fstype := casVolume.Spec.FSType + + labels := make(map[string]string) + labels[string(mconfig.CASTypeKey)] = "local-" + stgType + //labels[string(v1alpha1.StorageClassKey)] = *className + + //TODO Change the following to a builder pattern + pvObj, err := mPV.NewBuilder(). + WithName(name). + WithLabels(labels). + WithAnnotations(volAnnotations). + WithReclaimPolicy(opts.PersistentVolumeReclaimPolicy). + WithAccessModes(pvc.Spec.AccessModes). + WithVolumeMode(fs). + WithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]). + WithLocalHostDirectory(path). + WithNodeAffinity(node.Name). + Build() + + if err != nil { + return nil, err + } + + return pvObj, nil + +} + +// DeleteBlockDevice is invoked by the PVC controller to perform clean-up +// activities before deleteing the PV object. If reclaim policy is +// set to not-retain, then this function will delete the associated BDC +func (p *Provisioner) DeleteBlockDevice(pv *v1.PersistentVolume) (err error) { + defer func() { + err = errors.Wrapf(err, "failed to delete volume %v", pv.Name) + }() + + blkDevOpts := &HelperBlockDeviceOptions{ + name: pv.Name, + } + + //Determine if a BDC is set on the PV and save it to BlockDeviceOptions + blkDevOpts.setBlockDeviceClaimFromPV(pv) + + //Initiate clean up only when reclaim policy is not retain. + //TODO: this part of the code could be eliminated by setting up + // BDC owner reference to PVC. + glog.Infof("Release the Block Device Claim %v for PV %v", blkDevOpts.bdcName, pv.Name) + + if err := p.deleteBlockDeviceClaim(blkDevOpts); err != nil { + glog.Infof("clean up volume %v failed: %v", pv.Name, err) + return err + } + return nil +} diff --git a/cmd/provisioner-localpv/app/provisioner_hostpath.go b/cmd/provisioner-localpv/app/provisioner_hostpath.go new file mode 100644 index 0000000000..f63f0e92c8 --- /dev/null +++ b/cmd/provisioner-localpv/app/provisioner_hostpath.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "github.com/golang/glog" + "github.com/pkg/errors" + + pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" + mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + mPV "github.com/openebs/maya/pkg/kubernetes/persistentvolume/v1alpha1" + "k8s.io/api/core/v1" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ProvisionHostPath is invoked by the Provisioner which expect HostPath PV +// to be provisioned and a valid PV spec returned. +func (p *Provisioner) ProvisionHostPath(opts pvController.VolumeOptions, volumeConfig *VolumeConfig) (*v1.PersistentVolume, error) { + pvc := opts.PVC + node := opts.SelectedNode + name := opts.PVName + stgType := volumeConfig.GetStorageType() + + path, err := volumeConfig.GetPath() + if err != nil { + return nil, err + } + + glog.Infof("Creating volume %v at %v:%v", name, node.Name, path) + + //Before using the path for local PV, make sure it is created. + initCmdsForPath := []string{"mkdir", "-m", "0777", "-p"} + podOpts := &HelperPodOptions{ + cmdsForPath: initCmdsForPath, + name: name, + path: path, + nodeName: node.Name, + } + + iErr := p.createInitPod(podOpts) + if iErr != nil { + glog.Infof("Initialize volume %v failed: %v", name, iErr) + return nil, iErr + } + + // VolumeMode will always be specified as Filesystem for host path volume, + // and the value passed in from the PVC spec will be ignored. + fs := v1.PersistentVolumeFilesystem + + // It is possible that the HostPath doesn't already exist on the node. + // Set the Local PV to create it. + //hostPathType := v1.HostPathDirectoryOrCreate + + // TODO initialize the Labels and annotations + // Use annotations to specify the context using which the PV was created. + //volAnnotations := make(map[string]string) + //volAnnotations[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType + //fstype := casVolume.Spec.FSType + + labels := make(map[string]string) + labels[string(mconfig.CASTypeKey)] = "local-" + stgType + //labels[string(v1alpha1.StorageClassKey)] = *className + + //TODO Change the following to a builder pattern + pvObj, err := mPV.NewBuilder(). + WithName(name). + WithLabels(labels). + WithReclaimPolicy(opts.PersistentVolumeReclaimPolicy). + WithAccessModes(pvc.Spec.AccessModes). + WithVolumeMode(fs). + WithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]). + WithLocalHostDirectory(path). + WithNodeAffinity(node.Name). + Build() + + if err != nil { + return nil, err + } + + return pvObj, nil + +} + +// DeleteHostPath is invoked by the PVC controller to perform clean-up +// activities before deleteing the PV object. If reclaim policy is +// set to not-retain, then this function will create a helper pod +// to delete the host path from the node. +func (p *Provisioner) DeleteHostPath(pv *v1.PersistentVolume) (err error) { + defer func() { + err = errors.Wrapf(err, "failed to delete volume %v", pv.Name) + }() + + //Determine the path and node of the Local PV. + path, node, err := p.getPathAndNodeForPV(pv) + if err != nil { + return err + } + + //Initiate clean up only when reclaim policy is not retain. + glog.Infof("Deleting volume %v at %v:%v", pv.Name, node, path) + cleanupCmdsForPath := []string{"rm", "-rf"} + podOpts := &HelperPodOptions{ + cmdsForPath: cleanupCmdsForPath, + name: pv.Name, + path: path, + nodeName: node, + } + + if err := p.createCleanupPod(podOpts); err != nil { + return errors.Wrapf(err, "clean up volume %v failed", pv.Name) + } + return nil +} diff --git a/cmd/provisioner-localpv/app/provisioner_test.go b/cmd/provisioner-localpv/app/provisioner_test.go index 1bb6bbd055..cded540039 100644 --- a/cmd/provisioner-localpv/app/provisioner_test.go +++ b/cmd/provisioner-localpv/app/provisioner_test.go @@ -18,14 +18,14 @@ package app import ( //"fmt" - pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" + //pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" //mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" //"github.com/pkg/errors" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" //"os" - "reflect" - "testing" + //"reflect" + //"testing" ) func fakeDefaultConfigParser(path string, pvc *v1.PersistentVolumeClaim) (*VolumeConfig, error) { @@ -62,6 +62,7 @@ func fakeValidConfigParser(path string, pvc *v1.PersistentVolumeClaim) (*VolumeC // return nil, fmt.Errorf("failed to read configuration for pvc %v", path) //} +/* //func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.PersistentVolume, error) { func TestProvision(t *testing.T) { testCases := map[string]struct { @@ -169,15 +170,16 @@ func TestProvision(t *testing.T) { t.Errorf("expected pv, but got nil") return } - if err == nil && pv.Spec.HostPath == nil { + if err == nil && pv.Spec.Local == nil { t.Errorf("expected pv.Spec.HostPath, but got nil %v", pv) return } - actualValue := pv.Spec.PersistentVolumeSource.HostPath.Path + actualValue := pv.Spec.PersistentVolumeSource.Local.Path if !v.expectError && !reflect.DeepEqual(actualValue, v.expectValue) { t.Errorf("expected %s got %s", v.expectValue, actualValue) } }) } } +*/ diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 1c3e01e92d..1de01dadb9 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -179,7 +179,7 @@ type antiAffinityLabel struct { func defaultCVRList() cvrListFn { return func(namespace string, opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { - return cvr.KubeClient(cvr.WithNamespace(namespace)).List(opts) + return cvr.NewKubeclient(cvr.WithNamespace(namespace)).List(opts) } } diff --git a/pkg/client/k8s/k8s.go b/pkg/client/k8s/k8s.go index 1cf16c099f..398dead514 100644 --- a/pkg/client/k8s/k8s.go +++ b/pkg/client/k8s/k8s.go @@ -17,7 +17,6 @@ limitations under the License. package k8s import ( - "bytes" "encoding/json" openebs "github.com/openebs/maya/pkg/client/generated/clientset/versioned" @@ -51,7 +50,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" ) // K8sKind represents the Kinds understood by Kubernetes @@ -1215,55 +1213,6 @@ func (k *K8sClient) DeleteOEV1alpha1CVR(name string) error { }) } -// ExecCoreV1Pod run a command remotely in a container of a pod -func (k *K8sClient) ExecCoreV1Pod(name string, - podExecOptions *api_core_v1.PodExecOptions) (result []byte, err error) { - - // create request object for exec with pod exec options and ParameterCodec. - // ParameterCodec used to transform url values into versioned objects - req := k.cs.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(name). - Namespace(k.ns). - SubResource("exec"). - VersionedParams(podExecOptions, scheme.ParameterCodec) - - config, err := getK8sConfig() - if err != nil { - return - } - - // create exec executor which is an interface for transporting shell-style streams. - exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) - if err != nil { - return - } - var stdout, stderr bytes.Buffer - // Stream initiates the transport of the standard shell streams. It will transport any - // non-nil stream to a remote system, and return an error if a problem occurs. - err = exec.Stream(remotecommand.StreamOptions{ - Stdin: nil, - Stdout: &stdout, - Stderr: &stderr, - Tty: podExecOptions.TTY, - }) - if err != nil { - return - } - - // exec output struct contains stdout and stderr - type execOutput struct { - Stdout string `json:"stdout"` - Stderr string `json:"stderr"` - } - - op := execOutput{ - Stdout: stdout.String(), - Stderr: stderr.String(), - } - return json.Marshal(op) -} - func getK8sConfig() (config *rest.Config, err error) { k8sMaster := env.Get(env.KubeMaster) kubeConfig := env.Get(env.KubeConfig) diff --git a/pkg/cstorvolumereplica/v1alpha1/kubernetes.go b/pkg/cstorvolumereplica/v1alpha1/kubernetes.go index 211d43c453..453f306f22 100644 --- a/pkg/cstorvolumereplica/v1alpha1/kubernetes.go +++ b/pkg/cstorvolumereplica/v1alpha1/kubernetes.go @@ -15,65 +15,112 @@ package v1alpha1 import ( + "errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" - kclient "github.com/openebs/maya/pkg/client/k8s/v1alpha1" + + client "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" ) // getClientsetFn is a typed function that // abstracts fetching of internal clientset type getClientsetFn func() (clientset *clientset.Clientset, err error) +// getClientsetFromPathFn is a typed function that +// abstracts fetching of clientset from kubeConfigPath +type getClientsetForPathFn func(kubeConfigPath string) (clientset *clientset.Clientset, err error) + +// getFn is a typed function that abstracts get of cstorvolume replica instances +type getFn func(cli *clientset.Clientset, name, namespace string, + opts metav1.GetOptions) (*apis.CStorVolumeReplica, error) + // listFn is a typed function that abstracts // listing of cstor volume replica instances type listFn func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) -// kubeclient enables kubernetes API operations +// delFn is a typed function that abstracts delete of cstorvolume replica instances +type delFn func(cli *clientset.Clientset, name, namespace string, opts *metav1.DeleteOptions) error + +// Kubeclient enables kubernetes API operations // on cstor volume replica instance -type kubeclient struct { +type Kubeclient struct { // clientset refers to cstor volume replica's // clientset that will be responsible to // make kubernetes API calls clientset *clientset.Clientset + kubeConfigPath string // namespace holds the namespace on which // kubeclient has to operate namespace string // functions useful during mocking - getClientset getClientsetFn - list listFn + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + get getFn + list listFn + del delFn } // kubeclientBuildOption defines the abstraction // to build a kubeclient instance -type kubeclientBuildOption func(*kubeclient) +type kubeclientBuildOption func(*Kubeclient) // withDefaults sets the default options // of kubeclient instance -func (k *kubeclient) withDefaults() { +func (k *Kubeclient) withDefaults() { + if k.getClientset == nil { k.getClientset = func() (clients *clientset.Clientset, err error) { - config, err := kclient.Config().Get() + config, err := client.New().GetConfigForPathOrDirect() + if err != nil { + return nil, err + } + return clientset.NewForConfig(config) + } + } + if k.getClientsetForPath == nil { + k.getClientsetForPath = func(kubeConfigPath string) (clients *clientset.Clientset, err error) { + config, err := client.New(client.WithKubeConfigPath(kubeConfigPath)).GetConfigForPathOrDirect() if err != nil { return nil, err } return clientset.NewForConfig(config) } } + + if k.get == nil { + k.get = func(cli *clientset.Clientset, name, namespace string, opts metav1.GetOptions) (*apis.CStorVolumeReplica, error) { + return cli.OpenebsV1alpha1().CStorVolumeReplicas(namespace).Get(name, opts) + } + } if k.list == nil { k.list = func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { return cli.OpenebsV1alpha1().CStorVolumeReplicas(namespace).List(opts) } } + if k.del == nil { + k.del = func(cli *clientset.Clientset, name, namespace string, opts *metav1.DeleteOptions) error { + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + deletePropagation := metav1.DeletePropagationForeground + opts.PropagationPolicy = &deletePropagation + err := cli.OpenebsV1alpha1().CStorVolumeReplicas(namespace).Delete(name, opts) + return err + } + } } // WithKubeClient sets the kubernetes client against // the kubeclient instance func WithKubeClient(c *clientset.Clientset) kubeclientBuildOption { - return func(k *kubeclient) { + return func(k *Kubeclient) { k.clientset = c } } @@ -81,15 +128,23 @@ func WithKubeClient(c *clientset.Clientset) kubeclientBuildOption { // WithNamespace sets the kubernetes client against // the provided namespace func WithNamespace(namespace string) kubeclientBuildOption { - return func(k *kubeclient) { + return func(k *Kubeclient) { k.namespace = namespace } } -// KubeClient returns a new instance of kubeclient meant for +// WithKubeConfigPath sets the kubernetes client against +// the provided path +func WithKubeConfigPath(path string) kubeclientBuildOption { + return func(k *Kubeclient) { + k.kubeConfigPath = path + } +} + +// NewKubeclient returns a new instance of kubeclient meant for // cstor volume replica operations -func KubeClient(opts ...kubeclientBuildOption) *kubeclient { - k := &kubeclient{} +func NewKubeclient(opts ...kubeclientBuildOption) *Kubeclient { + k := &Kubeclient{} for _, o := range opts { o(k) } @@ -97,13 +152,20 @@ func KubeClient(opts ...kubeclientBuildOption) *kubeclient { return k } +func (k *Kubeclient) getClientsetForPathOrDirect() (*clientset.Clientset, error) { + if k.kubeConfigPath != "" { + return k.getClientsetForPath(k.kubeConfigPath) + } + return k.getClientset() +} + // getClientOrCached returns either a new instance // of kubernetes client or its cached copy -func (k *kubeclient) getClientOrCached() (*clientset.Clientset, error) { +func (k *Kubeclient) getClientOrCached() (*clientset.Clientset, error) { if k.clientset != nil { return k.clientset, nil } - c, err := k.getClientset() + c, err := k.getClientsetForPathOrDirect() if err != nil { return nil, err } @@ -113,10 +175,31 @@ func (k *kubeclient) getClientOrCached() (*clientset.Clientset, error) { // List returns a list of cstor volume replica // instances present in kubernetes cluster -func (k *kubeclient) List(opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { +func (k *Kubeclient) List(opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { cli, err := k.getClientOrCached() if err != nil { return nil, err } return k.list(cli, k.namespace, opts) } + +// Get returns cstorvolumereplica object for given name +func (k *Kubeclient) Get(name string, opts metav1.GetOptions) (*apis.CStorVolumeReplica, error) { + if len(name) == 0 { + return nil, errors.New("failed to get cstorvolume: name can't be empty") + } + cli, err := k.getClientOrCached() + if err != nil { + return nil, err + } + return k.get(cli, name, k.namespace, opts) +} + +// Delete delete the cstorvolume replica resource +func (k *Kubeclient) Delete(name string) error { + cli, err := k.getClientOrCached() + if err != nil { + return err + } + return k.del(cli, name, k.namespace, &metav1.DeleteOptions{}) +} diff --git a/pkg/cstorvolumereplica/v1alpha1/kubernetes_test.go b/pkg/cstorvolumereplica/v1alpha1/kubernetes_test.go index f3f94fa718..04be47bbd2 100644 --- a/pkg/cstorvolumereplica/v1alpha1/kubernetes_test.go +++ b/pkg/cstorvolumereplica/v1alpha1/kubernetes_test.go @@ -25,11 +25,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// fakeGetClientset gets the cvr clientset func fakeGetClientset() (clientset *clientset.Clientset, err error) { return &client.Clientset{}, nil } -func fakeListfn(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { +func fakeListOk(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*apis.CStorVolumeReplicaList, error) { return &apis.CStorVolumeReplicaList{}, nil } @@ -37,11 +38,19 @@ func fakeListErrfn(cli *clientset.Clientset, namespace string, opts metav1.ListO return &apis.CStorVolumeReplicaList{}, errors.New("some error") } -func fakeSetClientset(k *kubeclient) { +func fakeGetOk(cli *clientset.Clientset, name, namespace string, opts metav1.GetOptions) (*apis.CStorVolumeReplica, error) { + return &apis.CStorVolumeReplica{}, nil +} + +func fakeDeleteOk(cli *clientset.Clientset, name, namespace string, opts *metav1.DeleteOptions) error { + return nil +} + +func fakeSetClientset(k *Kubeclient) { k.clientset = &client.Clientset{} } -func fakeSetNilClientset(k *kubeclient) { +func fakeSetNilClientset(k *Kubeclient) { k.clientset = nil } @@ -49,11 +58,19 @@ func fakeGetNilErrClientSet() (clientset *clientset.Clientset, err error) { return nil, nil } +func fakeGetNilErrClientSetForPath(path string) (clientset *clientset.Clientset, err error) { + return nil, nil +} + +func fakeGetErrClientSetForPath(path string) (clientset *clientset.Clientset, err error) { + return nil, errors.New("Some error") +} + func fakeGetErrClientSet() (clientset *clientset.Clientset, err error) { return nil, errors.New("Some error") } -func fakeClientSet(k *kubeclient) {} +func fakeClientSet(k *Kubeclient) {} func TestKubernetesWithDefaults(t *testing.T) { tests := map[string]struct { @@ -67,9 +84,9 @@ func TestKubernetesWithDefaults(t *testing.T) { for name, mock := range tests { t.Run(name, func(t *testing.T) { - fc := &kubeclient{} + fc := &Kubeclient{} if !mock.expectListFn { - fc.list = fakeListfn + fc.list = fakeListOk } if !mock.expectGetClientset { fc.getClientset = fakeGetClientset @@ -98,7 +115,7 @@ func TestKubernetesWithKubeClient(t *testing.T) { for name, mock := range tests { t.Run(name, func(t *testing.T) { h := WithKubeClient(mock.Clientset) - fake := &kubeclient{} + fake := &Kubeclient{} h(fake) if mock.expectKubeClientEmpty && fake.clientset != nil { t.Fatalf("test %q failed expected fake.clientset to be empty", name) @@ -125,8 +142,10 @@ func TestKubernetesKubeClient(t *testing.T) { } for name, mock := range tests { + name := name + mock := mock t.Run(name, func(t *testing.T) { - c := KubeClient(mock.opts...) + c := NewKubeclient(mock.opts...) if !mock.expectClientSet && c.clientset != nil { t.Fatalf("test %q failed expected fake.clientset to be empty", name) } @@ -140,13 +159,12 @@ func TestKubernetesKubeClient(t *testing.T) { func TesKubernetestGetClientOrCached(t *testing.T) { tests := map[string]struct { expectErr bool - KubeClient *kubeclient + KubeClient *Kubeclient }{ - // Positive tests - "Positive 1": {false, &kubeclient{nil, "", fakeGetNilErrClientSet, fakeListfn}}, - "Positive 2": {false, &kubeclient{&client.Clientset{}, "", fakeGetNilErrClientSet, fakeListfn}}, + "Positive 1": {false, &Kubeclient{nil, "", "", fakeGetNilErrClientSet, fakeGetNilErrClientSetForPath, fakeGetOk, fakeListOk, fakeDeleteOk}}, + "Positive 2": {false, &Kubeclient{&client.Clientset{}, "", "", fakeGetNilErrClientSet, fakeGetNilErrClientSetForPath, fakeGetOk, fakeListOk, fakeDeleteOk}}, // Negative tests - "Negative 1": {true, &kubeclient{nil, "", fakeGetErrClientSet, fakeListfn}}, + "Negative 1": {true, &Kubeclient{nil, "", "", fakeGetErrClientSet, fakeGetErrClientSetForPath, fakeGetOk, fakeListOk, fakeDeleteOk}}, } for name, mock := range tests { @@ -168,14 +186,16 @@ func TestKubenetesList(t *testing.T) { list listFn expectErr bool }{ - "Test 1": {fakeGetErrClientSet, fakeListfn, true}, - "Test 2": {fakeGetClientset, fakeListfn, false}, + "Test 1": {fakeGetErrClientSet, fakeListOk, true}, + "Test 2": {fakeGetClientset, fakeListOk, false}, "Test 3": {fakeGetClientset, fakeListErrfn, true}, } for name, mock := range tests { + name := name + mock := mock t.Run(name, func(t *testing.T) { - k := kubeclient{getClientset: mock.getClientset, list: mock.list} + k := Kubeclient{getClientset: mock.getClientset, list: mock.list} _, err := k.List(metav1.ListOptions{}) if mock.expectErr && err == nil { t.Fatalf("Test %q failed: expected error not to be nil", name) diff --git a/pkg/install/v1alpha1/env.go b/pkg/install/v1alpha1/env.go index e0729a9a26..1738e92239 100644 --- a/pkg/install/v1alpha1/env.go +++ b/pkg/install/v1alpha1/env.go @@ -21,10 +21,11 @@ import ( ) const ( - // DefaultCstorSparsePool is the environment variable that flags if default - // cstor pool should be configured or not + // DefaultCstorSparsePool is the environment variable that + // flags if default cstor pool should be configured or not // - // If value is "true", default cstor pool will be installed/configured else - // for "false" it will not be configured + // If value is "true", default cstor pool will be + // installed/configured else for "false" it will + // not be configured DefaultCstorSparsePool menv.ENVKey = "OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL" ) diff --git a/pkg/install/v1alpha1/env_setter.go b/pkg/install/v1alpha1/env_setter.go index 5a1b041681..9df12ffce4 100644 --- a/pkg/install/v1alpha1/env_setter.go +++ b/pkg/install/v1alpha1/env_setter.go @@ -24,7 +24,8 @@ import ( ver "github.com/openebs/maya/pkg/version" ) -// EnvStatus represents the status of operation against an env instance +// EnvStatus represents the status of operation +// against an env instance type EnvStatus string // env set completion statuses @@ -44,11 +45,13 @@ type env struct { Err error } -// envPredicate abstracts evaluation condition of the given instance and returns -// the name of evaluation along with result of evaluation +// envPredicate abstracts evaluation condition of +// the given instance and returns the name of +// evaluation along with result of evaluation type envPredicate func(given *env) (name string, success bool) -// isEnvNotPresent returns true if env in not set previously +// isEnvNotPresent returns true if env in not set +// previously func isEnvNotPresent(given *env) (name string, success bool) { name = "isEnvNotPresent" if given == nil { @@ -73,10 +76,11 @@ func isEnvError(given *env) (name string, hasErr bool) { return } -// envMiddleware abstracts updating the given env instance +// envMiddleware abstracts updating given env instance type envMiddleware func(given *env) (updated *env) -// EnvUpdateStatus updates the env instance with provided status info +// EnvUpdateStatus updates the env instance with +// provided status info func EnvUpdateStatus(context, reason string, status EnvStatus) envMiddleware { return func(given *env) (updated *env) { if given == nil { @@ -90,7 +94,8 @@ func EnvUpdateStatus(context, reason string, status EnvStatus) envMiddleware { } } -// EnvUpdateError updates the env instance with provided error +// EnvUpdateError updates the env instance with +// provided error func EnvUpdateError(context string, err error) envMiddleware { return func(given *env) (updated *env) { if given == nil || err == nil { @@ -102,7 +107,8 @@ func EnvUpdateError(context string, err error) envMiddleware { } } -// EnvUpdateSuccess updates the env instance with success status +// EnvUpdateSuccess updates the env instance with +// success status func EnvUpdateSuccess(context string) envMiddleware { return func(given *env) (updated *env) { return EnvUpdateStatus(context, "", EnvSetSuccess)(given) @@ -154,7 +160,17 @@ func (l *envList) Infos() (msgs []string) { if env == nil || env.Err != nil { continue } - msgs = append(msgs, fmt.Sprintf("{env '%s': val '%s': msg: '%s' '%s' '%s'}", env.Key, env.Value, env.Context, env.Status, env.Reason)) + msgs = append( + msgs, + fmt.Sprintf( + "{env '%s': val '%s': msg: '%s' '%s' '%s'}", + env.Key, + env.Value, + env.Context, + env.Status, + env.Reason, + ), + ) } return } @@ -230,7 +246,11 @@ func (e *envInstall) List() (l *envList, err error) { l.Items = append(l.Items, &env{ Key: menv.CASTemplateToListVolumeENVK, Value: strings.Join(ver.WithSuffixesIf( - []string{"jiva-volume-list-default-0.6.0", "jiva-volume-list-default", "cstor-volume-list-default"}, + []string{ + "jiva-volume-list-default-0.6.0", + "jiva-volume-list-default", + "cstor-volume-list-default", + }, ver.IsNotVersioned), ","), }) l.Items = append(l.Items, &env{ diff --git a/pkg/install/v1alpha1/installer.go b/pkg/install/v1alpha1/installer.go index 91de243801..a8c937f0d3 100644 --- a/pkg/install/v1alpha1/installer.go +++ b/pkg/install/v1alpha1/installer.go @@ -14,9 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO -// Make use of pkg/msg instead of errorList - package v1alpha1 import ( @@ -41,6 +38,8 @@ type Installer interface { type simpleInstaller struct { artifactTemplater ArtifactMiddleware envLister EnvLister + + // TODO use pkg/errors/v1alpha1 errorList } @@ -52,7 +51,7 @@ func (i *simpleInstaller) prepareResources() k8s.UnstructedList { // set the environments conditionally required for install eslist := elist.SetIf(version.Current(), isEnvNotPresent) - glog.Infof("%+v", eslist.Infos()) + glog.V(2).Infof("%+v", eslist.Infos()) i.addErrors(eslist.Errors()) // list the artifacts w.r.t latest version @@ -115,7 +114,12 @@ func (i *simpleInstaller) Install() []error { cu := k8s.CreateOrUpdate(k8s.GroupVersionResourceFromGVK(unstruct), unstruct.GetNamespace()) u, err := cu.Apply(unstruct) if err == nil { - glog.Infof("'%s' '%s' installed successfully at namespace '%s'", u.GroupVersionKind(), u.GetName(), u.GetNamespace()) + glog.V(2).Infof( + "{%s/%s} installed successfully at namespace {%s}", + u.GroupVersionKind(), + u.GetName(), + u.GetNamespace(), + ) } else { i.addError(err) } diff --git a/pkg/install/v1alpha1/jiva_volume.go b/pkg/install/v1alpha1/jiva_volume.go index 986f880a07..8edd5b4f01 100644 --- a/pkg/install/v1alpha1/jiva_volume.go +++ b/pkg/install/v1alpha1/jiva_volume.go @@ -1351,9 +1351,8 @@ metadata: name: jiva-volume-delete-putreplicascrub-default spec: meta: | - {{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}} apiVersion: batch/v1 - runNamespace: {{ $jivapodsns }} + runNamespace: {{ .Config.OpenEBSNamespace.value }} disable: {{ .Config.RetainReplicaData.enabled }} kind: Job action: put @@ -1374,6 +1373,9 @@ spec: openebs.io/cas-type: jiva spec: backoffLimit: 4 + {{- if kubeVersionGte .CAST.kubeVersion "v1.12.0" }} + ttlSecondsAfterFinished: 0 + {{- end }} template: spec: restartPolicy: Never diff --git a/pkg/install/v1alpha1/localpv_sc.go b/pkg/install/v1alpha1/localpv_sc.go index a15e416f88..1759414e6b 100644 --- a/pkg/install/v1alpha1/localpv_sc.go +++ b/pkg/install/v1alpha1/localpv_sc.go @@ -38,6 +38,24 @@ provisioner: openebs.io/local volumeBindingMode: WaitForFirstConsumer reclaimPolicy: Delete --- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-device + annotations: + #Define a new CAS Type called "local" + #which indicates that Data is stored + #directly onto hostpath. The hostpath can be: + #- device (as block or mounted path) + #- hostpath (sub directory on OS or mounted path) + openebs.io/cas-type: local + cas.openebs.io/config: | + - name: StorageType + value: "device" +provisioner: openebs.io/local +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +--- ` // LocalPVArtifacts returns the default Local PV storage diff --git a/pkg/install/v1alpha1/marshall.go b/pkg/install/v1alpha1/marshall.go index 2487e81599..4cac0e9ed7 100644 --- a/pkg/install/v1alpha1/marshall.go +++ b/pkg/install/v1alpha1/marshall.go @@ -22,8 +22,9 @@ import ( "github.com/pkg/errors" ) -// ConfigUnmarshaller abstracts un-marshalling of config specifications into an -// instance of install config +// ConfigUnmarshaller abstracts un-marshalling of +// config specifications into an instance of install +// config type ConfigUnmarshaller func(conf string) (config *InstallConfig, err error) // UnmarshallConfig is an implementation of ConfigUnmarshaller diff --git a/pkg/install/v1alpha1/registrar.go b/pkg/install/v1alpha1/registrar.go index 3a3c3a66a0..c1a53f54a9 100644 --- a/pkg/install/v1alpha1/registrar.go +++ b/pkg/install/v1alpha1/registrar.go @@ -20,27 +20,31 @@ import ( "strings" ) -// MultiYamlFetcher abstracts aggregating and returning multiple yaml documents -// as a string +// MultiYamlFetcher abstracts aggregating and +// returning multiple yaml documents as a string type MultiYamlFetcher interface { FetchYamls() string } -// ArtifactListPredicate abstracts evaluating a condition against the provided -// artifact list +// ArtifactListPredicate abstracts evaluating a +// condition against the provided artifact list type ArtifactListPredicate func() bool -// ParseArtifactListFromMultipleYamlsIf generates a list of Artifacts from -// yaml documents if predicate evaluation succeeds -func ParseArtifactListFromMultipleYamlsIf(m MultiYamlFetcher, p ArtifactListPredicate) (artifacts []*Artifact) { +// ParseArtifactListFromMultipleYamlsIf generates a +// list of Artifacts from yaml documents if predicate +// evaluation succeeds +func ParseArtifactListFromMultipleYamlsIf( + m MultiYamlFetcher, + p ArtifactListPredicate, +) (artifacts []*Artifact) { if p() { return ParseArtifactListFromMultipleYamls(m) } return } -// ParseArtifactListFromMultipleYamls generates a list of Artifacts from the -// yaml documents. +// ParseArtifactListFromMultipleYamls generates a list of +// Artifacts from the yaml documents. // // NOTE: // Each YAML document is assumed to be separated via "---" @@ -56,16 +60,19 @@ func ParseArtifactListFromMultipleYamls(m MultiYamlFetcher) (artifacts []*Artifa return } -// RegisteredArtifacts returns the list of latest Artifacts that will get -// installed +// RegisteredArtifacts returns the list of latest +// Artifacts that will get installed func RegisteredArtifacts() (list artifactList) { - //Note: CRDs have to be installed first. Keep this at top of the list. + // Note: CRDs need to be installed first + // Keep this at top of the list list.Items = append(list.Items, OpenEBSCRDArtifacts().Items...) list.Items = append(list.Items, JivaVolumeArtifacts().Items...) - //Contains the read/list/delete CAST for supporting older volumes - //The CAST defined here are provided as fallback options to latest CAST + + // Contains read/list/delete CAST for supporting older volumes + // CAST defined here are provided as fallback options to latest CAST list.Items = append(list.Items, JivaVolumeArtifactsFor060().Items...) + list.Items = append(list.Items, JivaPoolArtifacts().Items...) list.Items = append(list.Items, CstorPoolArtifacts().Items...) @@ -73,8 +80,9 @@ func RegisteredArtifacts() (list artifactList) { list.Items = append(list.Items, CstorSnapshotArtifacts().Items...) list.Items = append(list.Items, CstorSparsePoolArtifacts().Items...) - //Contains the SC to help with provisioning from clone. - //This is generic for release till K8s supports native way of cloning. + // Contains SC to help with provisioning from clone + // This is generic for release till K8s supports native + // way of cloning list.Items = append(list.Items, SnapshotPromoterSCArtifacts().Items...) // snapshots diff --git a/pkg/kubernetes/container/v1alpha1/container.go b/pkg/kubernetes/container/v1alpha1/container.go index dfd694d357..6c510356b2 100644 --- a/pkg/kubernetes/container/v1alpha1/container.go +++ b/pkg/kubernetes/container/v1alpha1/container.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/pkg/errors" + errors "github.com/openebs/maya/pkg/errors/v1alpha1" corev1 "k8s.io/api/core/v1" ) @@ -89,22 +89,22 @@ func New(opts ...OptionFunc) corev1.Container { return c.asContainer() } -// builder provides utilities required to build a kubernetes container type -type builder struct { +// Builder provides utilities required to build a kubernetes container type +type Builder struct { con *container // container instance checks []Predicate // validations to be done while building the container instance errors []error // errors found while building the container instance } -// Builder returns a new instance of builder -func Builder() *builder { - return &builder{ +// NewBuilder returns a new instance of builder +func NewBuilder() *Builder { + return &Builder{ con: &container{}, } } // validate will run checks against container instance -func (b *builder) validate() error { +func (b *Builder) validate() error { for _, c := range b.checks { if m, ok := c(b.con); !ok { b.errors = append(b.errors, predicateFailedError(m)) @@ -117,7 +117,7 @@ func (b *builder) validate() error { } // Build returns the final kubernetes container -func (b *builder) Build() (corev1.Container, error) { +func (b *Builder) Build() (corev1.Container, error) { err := b.validate() if err != nil { return corev1.Container{}, err @@ -127,14 +127,14 @@ func (b *builder) Build() (corev1.Container, error) { // AddCheck adds the predicate as a condition to be validated against the // container instance -func (b *builder) AddCheck(p Predicate) *builder { +func (b *Builder) AddCheck(p Predicate) *Builder { b.checks = append(b.checks, p) return b } // AddChecks adds the provided predicates as conditions to be validated against // the container instance -func (b *builder) AddChecks(p []Predicate) *builder { +func (b *Builder) AddChecks(p []Predicate) *Builder { for _, check := range p { b.AddCheck(check) } @@ -142,7 +142,7 @@ func (b *builder) AddChecks(p []Predicate) *builder { } // WithName sets the name of the container -func (b *builder) WithName(name string) *builder { +func (b *Builder) WithName(name string) *Builder { WithName(name)(b.con) return b } @@ -155,7 +155,7 @@ func WithName(name string) OptionFunc { } // WithImage sets the image of the container -func (b *builder) WithImage(img string) *builder { +func (b *Builder) WithImage(img string) *Builder { WithImage(img)(b.con) return b } @@ -168,7 +168,7 @@ func WithImage(img string) OptionFunc { } // WithCommand sets the command of the container -func (b *builder) WithCommand(cmd []string) *builder { +func (b *Builder) WithCommand(cmd []string) *Builder { WithCommand(cmd)(b.con) return b } @@ -181,7 +181,7 @@ func WithCommand(cmd []string) OptionFunc { } // WithArguments sets the command arguments of the container -func (b *builder) WithArguments(args []string) *builder { +func (b *Builder) WithArguments(args []string) *Builder { WithArguments(args)(b.con) return b } @@ -194,7 +194,7 @@ func WithArguments(args []string) OptionFunc { } // WithArguments sets the command arguments of the container -func (b *builder) WithVolumeMounts(args []corev1.VolumeMount) *builder { +func (b *Builder) WithVolumeMounts(args []corev1.VolumeMount) *Builder { WithVolumeMounts(args)(b.con) return b } diff --git a/pkg/kubernetes/container/v1alpha1/container_test.go b/pkg/kubernetes/container/v1alpha1/container_test.go index 7f4f499ee8..d357d1c05c 100644 --- a/pkg/kubernetes/container/v1alpha1/container_test.go +++ b/pkg/kubernetes/container/v1alpha1/container_test.go @@ -40,6 +40,8 @@ func TestPredicateFailedError(t *testing.T) { "always false": {"fakeAlwaysFalse", "predicatefailed: fakeAlwaysFalse"}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { e := predicateFailedError(mock.predicateMessage) if e.Error() != mock.expectedErr { @@ -82,7 +84,7 @@ func TestNewWithArguments(t *testing.T) { } func TestBuilderBuild(t *testing.T) { - _, err := Builder().Build() + _, err := NewBuilder().Build() if err != nil { t.Fatalf("test failed: expected no err: actual '%+v'", err) } @@ -98,8 +100,10 @@ func TestBuilderValidation(t *testing.T) { "true & false": {[]Predicate{fakeAlwaysTrue, fakeAlwaysFalse}, true}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - _, err := Builder().AddChecks(mock.checks).Build() + _, err := NewBuilder().AddChecks(mock.checks).Build() if mock.isError && err == nil { t.Fatalf("test '%s' failed: expected error: actual no error", name) } @@ -120,8 +124,10 @@ func TestBuilderAddChecks(t *testing.T) { "two": {[]Predicate{fakeAlwaysTrue, fakeAlwaysFalse}, 2}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - b := Builder().AddChecks(mock.checks) + b := NewBuilder().AddChecks(mock.checks) if len(b.checks) != mock.expectedCount { t.Fatalf("test '%s' failed: expected no of checks '%d': actual '%d'", name, mock.expectedCount, len(b.checks)) } @@ -139,8 +145,10 @@ func TestBuilderWithName(t *testing.T) { "t3": {"ndm", "ndm"}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - c, _ := Builder().WithName(mock.name).Build() + c, _ := NewBuilder().WithName(mock.name).Build() if c.Name != mock.expectedName { t.Fatalf("test '%s' failed: expected name '%s': actual '%s'", name, mock.expectedName, c.Name) } @@ -158,8 +166,10 @@ func TestBuilderWithImage(t *testing.T) { "t3": {"openebs.io/ndm:1.0", "openebs.io/ndm:1.0"}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - c, _ := Builder().WithImage(mock.image).Build() + c, _ := NewBuilder().WithImage(mock.image).Build() if c.Image != mock.expectedImage { t.Fatalf("test '%s' failed: expected image '%s': actual '%s'", name, mock.expectedImage, c.Image) } @@ -175,8 +185,10 @@ func TestBuilderWithCommand(t *testing.T) { "t1": {[]string{"kubectl", "get", "po"}, []string{"kubectl", "get", "po"}}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - c, _ := Builder().WithCommand(mock.cmd).Build() + c, _ := NewBuilder().WithCommand(mock.cmd).Build() if !reflect.DeepEqual(c.Command, mock.expectedCmd) { t.Fatalf("test '%s' failed: expected command '%q': actual '%q'", name, mock.expectedCmd, c.Command) } @@ -192,8 +204,10 @@ func TestBuilderWithArguments(t *testing.T) { "t1": {[]string{"-jsonpath", "metadata.name"}, []string{"-jsonpath", "metadata.name"}}, } for name, mock := range tests { + name := name // pin it + mock := mock // pin it t.Run(name, func(t *testing.T) { - c, _ := Builder().WithArguments(mock.args).Build() + c, _ := NewBuilder().WithArguments(mock.args).Build() if !reflect.DeepEqual(c.Args, mock.expectedArgs) { t.Fatalf("test '%s' failed: expected arguments '%q': actual '%q'", name, mock.expectedArgs, c.Args) } diff --git a/pkg/kubernetes/deployment/appsv1/v1alpha1/deployment.go b/pkg/kubernetes/deployment/appsv1/v1alpha1/deployment.go index a96a2f9eca..71ea642eb9 100644 --- a/pkg/kubernetes/deployment/appsv1/v1alpha1/deployment.go +++ b/pkg/kubernetes/deployment/appsv1/v1alpha1/deployment.go @@ -18,8 +18,12 @@ package v1alpha1 import ( stringer "github.com/openebs/maya/pkg/apis/stringer/v1alpha1" - "github.com/pkg/errors" + errors "github.com/openebs/maya/pkg/errors/v1alpha1" + container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Predicate abstracts conditional logic w.r.t the deployment instance @@ -33,10 +37,10 @@ import ( // imperatives i.e. actions that form the business logic type Predicate func(*Deploy) bool -// Deploy is the wrapper over k8s deployment object +// Deploy is the wrapper over k8s deployment Object type Deploy struct { // kubernetes deployment instance - object *appsv1.Deployment + Object *appsv1.Deployment } // Builder enables building an instance of @@ -66,7 +70,7 @@ const ( // String implements the stringer interface func (d *Deploy) String() string { - return stringer.Yaml("deployment", d.object) + return stringer.Yaml("deployment", d.Object) } // GoString implements the goStringer interface @@ -78,20 +82,100 @@ func (d *Deploy) GoString() string { func NewBuilder() *Builder { return &Builder{ deployment: &Deploy{ - object: &appsv1.Deployment{}, + Object: &appsv1.Deployment{}, }, } } +// WithName sets the Name field of deployment with provided value. +func (b *Builder) WithName(name string) *Builder { + if len(name) == 0 { + b.errors = append(b.errors, errors.New("failed to build deployment: missing deployment name")) + return b + } + b.deployment.Object.Name = name + return b +} + +// WithNamespace sets the Namespace field of deployment with provided value. +func (b *Builder) WithNamespace(namespace string) *Builder { + if len(namespace) == 0 { + b.errors = append(b.errors, errors.New("failed to build deployment: missing namespace")) + return b + } + b.deployment.Object.Namespace = namespace + return b +} + +// WithContainer sets the container field of the deployment object with the given container object +func (b *Builder) WithContainer(container *corev1.Container) *Builder { + b.deployment.Object.Spec.Template.Spec.Containers = append(b.deployment.Object.Spec.Template.Spec.Containers, *container) + return b +} + +// WithContainerBuilder builds the containerbuilder provided and +// sets the container field of the deployment object with the given container object +func (b *Builder) WithContainerBuilder(containerBuilder *container.Builder) *Builder { + containerObj, err := containerBuilder.Build() + if err != nil { + b.errors = append(b.errors, errors.Wrap(err, "failed to build deployment")) + return b + } + b.deployment.Object.Spec.Template.Spec.Containers = append( + b.deployment.Object.Spec.Template.Spec.Containers, + containerObj, + ) + return b +} + +// WithVolumes sets Volumes field of deployment. +func (b *Builder) WithVolumes(vol []corev1.Volume) *Builder { + b.deployment.Object.Spec.Template.Spec.Volumes = vol + return b +} + +// WithVolumeBuilder sets Volumes field of deployment. +func (b *Builder) WithVolumeBuilder(volumeBuilder *volume.Builder) *Builder { + vol, err := volumeBuilder.Build() + if err != nil { + b.errors = append(b.errors, errors.Wrap(err, "failed to build deployment")) + return b + } + b.deployment.Object.Spec.Template.Spec.Volumes = append( + b.deployment.Object.Spec.Template.Spec.Volumes, + *vol, + ) + return b +} + +// WithLabels sets the label field of deployment +func (b *Builder) WithLabels(labels map[string]string) *Builder { + b.deployment.Object.Labels = labels + return b +} + +// WithLabelsAndSelector sets label selector for template and deployment +func (b *Builder) WithLabelsAndSelector(labels map[string]string) *Builder { + if len(labels) == 0 { + b.errors = append(b.errors, errors.New("failed to build deployment: missing labels")) + return b + } + labelselector := metav1.LabelSelector{} + labelselector.MatchLabels = labels + b.deployment.Object.Spec.Selector = &labelselector + b.deployment.Object.Spec.Template.Labels = labels + return b +} + // NewBuilderForAPIObject returns a new instance of builder -// for a given deployment object +// for a given deployment Object func NewBuilderForAPIObject(deployment *appsv1.Deployment) *Builder { b := NewBuilder() if deployment != nil { - b.deployment.object = deployment + b.deployment.Object = deployment } else { b.errors = append(b.errors, - errors.New("nil deployment object given to get builder instance")) + errors.New("nil deployment given to get builder instance")) } return b } @@ -101,8 +185,8 @@ func (b *Builder) Build() (*Deploy, error) { err := b.validate() if err != nil { return nil, errors.Wrapf(err, - "failed to build a deployment instance: %s", - b.deployment.object) + "failed to build a deployment: %s", + b.deployment.Object) } return b.deployment, nil } @@ -191,7 +275,7 @@ func IsProgressDeadlineExceeded() Predicate { // If `Progressing` condition's reason is `ProgressDeadlineExceeded` then // it is not rolled out. func (d *Deploy) IsProgressDeadlineExceeded() bool { - for _, cond := range d.object.Status.Conditions { + for _, cond := range d.Object.Status.Conditions { if cond.Type == appsv1.DeploymentProgressing && cond.Reason == "ProgressDeadlineExceeded" { return true @@ -213,8 +297,8 @@ func IsOlderReplicaActive() Predicate { // Status.UpdatedReplicas < *Spec.Replicas then some of the replicas are // updated and some of them are not. func (d *Deploy) IsOlderReplicaActive() bool { - return d.object.Spec.Replicas != nil && - d.object.Status.UpdatedReplicas < *d.object.Spec.Replicas + return d.Object.Spec.Replicas != nil && + d.Object.Status.UpdatedReplicas < *d.Object.Spec.Replicas } // IsTerminationInProgress checks for older replicas are waiting to @@ -234,7 +318,7 @@ func IsTerminationInProgress() Predicate { // replicas are not in running state. It waits for newer replica to // come into running state then terminate. func (d *Deploy) IsTerminationInProgress() bool { - return d.object.Status.Replicas > d.object.Status.UpdatedReplicas + return d.Object.Status.Replicas > d.Object.Status.UpdatedReplicas } // IsUpdateInProgress Checks if all the replicas are updated or not. @@ -250,7 +334,7 @@ func IsUpdateInProgress() Predicate { // If Status.AvailableReplicas < Status.UpdatedReplicas then all the // older replicas are not there but there are less number of availableReplicas func (d *Deploy) IsUpdateInProgress() bool { - return d.object.Status.AvailableReplicas < d.object.Status.UpdatedReplicas + return d.Object.Status.AvailableReplicas < d.Object.Status.UpdatedReplicas } // IsNotSyncSpec compare generation in status and spec and check if @@ -266,5 +350,5 @@ func IsNotSyncSpec() Predicate { // deployment spec is synced or not. If Generation <= Status.ObservedGeneration // then deployment spec is not updated yet. func (d *Deploy) IsNotSyncSpec() bool { - return d.object.Generation > d.object.Status.ObservedGeneration + return d.Object.Generation > d.Object.Status.ObservedGeneration } diff --git a/pkg/kubernetes/deployment/appsv1/v1alpha1/kubernetes.go b/pkg/kubernetes/deployment/appsv1/v1alpha1/kubernetes.go index e8a32cf432..80a6dac67e 100644 --- a/pkg/kubernetes/deployment/appsv1/v1alpha1/kubernetes.go +++ b/pkg/kubernetes/deployment/appsv1/v1alpha1/kubernetes.go @@ -17,8 +17,10 @@ package v1alpha1 import ( "encoding/json" + "strings" client "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" + "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -28,10 +30,22 @@ import ( // that abstracts fetching of internal clientset type getClientsetFn func() (clientset *kubernetes.Clientset, err error) +// getClientsetFromPathFn is a typed function that +// abstracts fetching of clientset from kubeConfigPath +type getClientsetForPathFn func(kubeConfigPath string) (clientset *kubernetes.Clientset, err error) + // getFn is a typed function that abstracts get of deployment instances type getFn func(cli *kubernetes.Clientset, name, namespace string, opts *metav1.GetOptions) (*appsv1.Deployment, error) +// createFn is a typed function that abstracts +// creation of deployment +type createFn func(cli *kubernetes.Clientset, namespace string, deploy *appsv1.Deployment) (*appsv1.Deployment, error) + +// deleteFn is a typed function that abstracts +// deletion of deployments +type deleteFn func(cli *kubernetes.Clientset, namespace string, name string, opts *metav1.DeleteOptions) error + // rolloutStatusFn is a typed function that abstracts // rollout status of deployment instances type rolloutStatusFn func(d *appsv1.Deployment) (*RolloutOutput, error) @@ -40,6 +54,57 @@ type rolloutStatusFn func(d *appsv1.Deployment) (*RolloutOutput, error) // rollout status of deployment instances type rolloutStatusfFn func(d *appsv1.Deployment) ([]byte, error) +// defaultGet is default implementation of get function +func defaultGet(cli *kubernetes.Clientset, name, + namespace string, opts *metav1.GetOptions) ( + d *appsv1.Deployment, err error) { + d, err = cli.AppsV1(). + Deployments(namespace). + Get(name, *opts) + return +} + +// defaultCreate is default implementation of create function +func defaultCreate(cli *kubernetes.Clientset, + namespace string, deploy *appsv1.Deployment) ( + d *appsv1.Deployment, err error) { + d, err = cli.AppsV1(). + Deployments(namespace). + Create(deploy) + return +} + +// defaultDel is default implementation of del function +func defaultDel(cli *kubernetes.Clientset, namespace, + name string, opts *metav1.DeleteOptions) (err error) { + err = cli.AppsV1(). + Deployments(namespace). + Delete(name, opts) + return +} + +// defaultRolloutStatus is default implementation of rolloutStatus function +func defaultRolloutStatus(d *appsv1.Deployment) ( + *RolloutOutput, error) { + b, err := NewBuilderForAPIObject(d). + Build() + if err != nil { + return nil, err + } + return b.RolloutStatus() +} + +// deafultRolloutStatusf is default implementation of rolloutStatusf function +func deafultRolloutStatusf(d *appsv1.Deployment) ( + []byte, error) { + b, err := NewBuilderForAPIObject(d). + Build() + if err != nil { + return nil, err + } + return b.RolloutStatusRaw() +} + // Kubeclient enables kubernetes API operations on deployment instance type Kubeclient struct { // clientset refers to kubernetes clientset. It is responsible to @@ -47,11 +112,17 @@ type Kubeclient struct { clientset *kubernetes.Clientset namespace string + // kubeconfig path to get kubernetes clientset + kubeConfigPath string + // functions useful during mocking - getClientset getClientsetFn - get getFn - rolloutStatus rolloutStatusFn - rolloutStatusf rolloutStatusfFn + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + get getFn + create createFn + del deleteFn + rolloutStatus rolloutStatusFn + rolloutStatusf rolloutStatusfFn } // KubeclientBuildOption defines the abstraction to build a kubeclient instance @@ -59,50 +130,61 @@ type KubeclientBuildOption func(*Kubeclient) // withDefaults sets the default options of kubeclient instance func (k *Kubeclient) withDefaults() { + if k.getClientset == nil { k.getClientset = func() ( clients *kubernetes.Clientset, err error) { - config, err := client.GetConfig(client.New()) - if err != nil { - return nil, err - } - return kubernetes.NewForConfig(config) + return client.New(). + Clientset() + } + } + if k.getClientsetForPath == nil { + k.getClientsetForPath = func(kubeConfigPath string) ( + clients *kubernetes.Clientset, err error) { + return client.New(client.WithKubeConfigPath(kubeConfigPath)). + Clientset() } } if k.get == nil { - k.get = func(cli *kubernetes.Clientset, name, - namespace string, opts *metav1.GetOptions) ( + k.get = defaultGet + } + + if k.create == nil { + k.create = defaultCreate + } + + if k.del == nil { + k.del = defaultDel + } + + if k.create == nil { + k.create = func(cli *kubernetes.Clientset, + namespace string, deploy *appsv1.Deployment) ( d *appsv1.Deployment, err error) { d, err = cli.AppsV1(). Deployments(namespace). - Get(name, *opts) + Create(deploy) return } } - if k.rolloutStatus == nil { - k.rolloutStatus = func(d *appsv1.Deployment) ( - *RolloutOutput, error) { - b, err := NewBuilderForAPIObject(d). - Build() - if err != nil { - return nil, err - } - return b.RolloutStatus() + if k.del == nil { + k.del = func(cli *kubernetes.Clientset, namespace, + name string, opts *metav1.DeleteOptions) (err error) { + err = cli.AppsV1(). + Deployments(namespace). + Delete(name, opts) + return } } + if k.rolloutStatus == nil { + k.rolloutStatus = defaultRolloutStatus + } + if k.rolloutStatusf == nil { - k.rolloutStatusf = func(d *appsv1.Deployment) ( - []byte, error) { - b, err := NewBuilderForAPIObject(d). - Build() - if err != nil { - return nil, err - } - return b.RolloutStatusRaw() - } + k.rolloutStatusf = deafultRolloutStatusf } } @@ -114,6 +196,21 @@ func WithClientset(c *kubernetes.Clientset) KubeclientBuildOption { } } +// WithNamespace sets the kubernetes client against +// the provided namespace +func (k *Kubeclient) WithNamespace(namespace string) *Kubeclient { + k.namespace = namespace + return k +} + +// WithKubeConfigPath sets the kubeConfig path +// against client instance +func WithKubeConfigPath(path string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.kubeConfigPath = path + } +} + // WithNamespace set namespace in kubeclient object func WithNamespace(namespace string) KubeclientBuildOption { return func(k *Kubeclient) { @@ -121,9 +218,9 @@ func WithNamespace(namespace string) KubeclientBuildOption { } } -// KubeClient returns a new instance of kubeclient meant for deployment. +// NewKubeClient returns a new instance of kubeclient meant for deployment. // caller can configure it with different kubeclientBuildOption -func KubeClient(opts ...KubeclientBuildOption) *Kubeclient { +func NewKubeClient(opts ...KubeclientBuildOption) *Kubeclient { k := &Kubeclient{} for _, o := range opts { o(k) @@ -132,13 +229,20 @@ func KubeClient(opts ...KubeclientBuildOption) *Kubeclient { return k } +func (k *Kubeclient) getClientsetForPathOrDirect() (*kubernetes.Clientset, error) { + if k.kubeConfigPath != "" { + return k.getClientsetForPath(k.kubeConfigPath) + } + return k.getClientset() +} + // getClientOrCached returns either a new instance // of kubernetes client or its cached copy func (k *Kubeclient) getClientOrCached() (*kubernetes.Clientset, error) { if k.clientset != nil { return k.clientset, nil } - c, err := k.getClientset() + c, err := k.getClientsetForPathOrDirect() if err != nil { return nil, err } @@ -168,6 +272,31 @@ func (k *Kubeclient) GetRaw(name string) ([]byte, error) { return json.Marshal(d) } +// Delete deletes a deployment instance from the +// kubernetes cluster +func (k *Kubeclient) Delete(name string, opts *metav1.DeleteOptions) error { + if strings.TrimSpace(name) == "" { + return errors.New("failed to delete deployment: missing deployment name") + } + cli, err := k.getClientOrCached() + if err != nil { + return errors.Wrapf(err, "failed to delete deployment {%s}", name) + } + return k.del(cli, k.namespace, name, opts) +} + +// Create creates a deployment in specified namespace in kubernetes cluster +func (k *Kubeclient) Create(deployment *appsv1.Deployment) (*appsv1.Deployment, error) { + if deployment == nil { + return nil, errors.New("failed to create deployment: nil deployment object") + } + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf(err, "failed to create deployment {%s} in namespace {%s}", deployment.Name, deployment.Namespace) + } + return k.create(cli, k.namespace, deployment) +} + // RolloutStatusf returns deployment's rollout status for given name // in raw bytes func (k *Kubeclient) RolloutStatusf(name string) (op []byte, err error) { diff --git a/pkg/kubernetes/deployment/appsv1/v1alpha1/rollout_status.go b/pkg/kubernetes/deployment/appsv1/v1alpha1/rollout_status.go index a0994e9742..fc5f9ca80d 100644 --- a/pkg/kubernetes/deployment/appsv1/v1alpha1/rollout_status.go +++ b/pkg/kubernetes/deployment/appsv1/v1alpha1/rollout_status.go @@ -38,25 +38,25 @@ var rolloutStatuses = map[PredicateName]rolloutStatus{ // PredicateOlderReplicaActive refer to rolloutStatus for // predicate IsOlderReplicaActive. PredicateOlderReplicaActive: func(d *Deploy) string { - if d.object.Spec.Replicas == nil { + if d.Object.Spec.Replicas == nil { return "replica update in-progress: some older replicas were updated" } return fmt.Sprintf( "replica update in-progress: %d of %d new replicas were updated", - d.object.Status.UpdatedReplicas, *d.object.Spec.Replicas) + d.Object.Status.UpdatedReplicas, *d.Object.Spec.Replicas) }, // PredicateTerminationInProgress refer rolloutStatus // for predicate IsTerminationInProgress. PredicateTerminationInProgress: func(d *Deploy) string { return fmt.Sprintf( "replica termination in-progress: %d old replicas are pending termination", - d.object.Status.Replicas-d.object.Status.UpdatedReplicas) + d.Object.Status.Replicas-d.Object.Status.UpdatedReplicas) }, // PredicateUpdateInProgress refer to rolloutStatus for predicate IsUpdateInProgress. PredicateUpdateInProgress: func(d *Deploy) string { return fmt.Sprintf( "replica update in-progress: %d of %d updated replicas are available", - d.object.Status.AvailableReplicas, d.object.Status.UpdatedReplicas) + d.Object.Status.AvailableReplicas, d.Object.Status.UpdatedReplicas) }, // PredicateNotSpecSynced refer to status rolloutStatus for predicate IsNotSyncSpec. PredicateNotSpecSynced: func(d *Deploy) string { diff --git a/pkg/kubernetes/namespace/v1alpha1/namespace.go b/pkg/kubernetes/namespace/v1alpha1/namespace.go index 819ec62bb9..9d73ef5738 100644 --- a/pkg/kubernetes/namespace/v1alpha1/namespace.go +++ b/pkg/kubernetes/namespace/v1alpha1/namespace.go @@ -49,6 +49,12 @@ func (b *Builder) WithName(name string) *Builder { return b } +// WithGenerateName appends a random string after the name +func (b *Builder) WithGenerateName(name string) *Builder { + b.ns.object.GenerateName = name + "-" + return b +} + // Build returns the Namespace instance func (b *Builder) Build() (*Namespace, error) { if len(b.errs) > 0 { diff --git a/pkg/kubernetes/persistentvolume/v1alpha1/build.go b/pkg/kubernetes/persistentvolume/v1alpha1/build.go index 91b8dae85a..36a75fd058 100644 --- a/pkg/kubernetes/persistentvolume/v1alpha1/build.go +++ b/pkg/kubernetes/persistentvolume/v1alpha1/build.go @@ -105,21 +105,15 @@ func (b *Builder) WithCapacityQty(resCapacity resource.Quantity) *Builder { return b } -// WithHostDirectory sets the VolumeSource field of PV with provided hostpath -// as type directory. -func (b *Builder) WithHostDirectory(path string) *Builder { +// WithLocalHostDirectory sets the LocalVolumeSource field of PV with provided hostpath +func (b *Builder) WithLocalHostDirectory(path string) *Builder { if len(path) == 0 { b.errs = append(b.errs, errors.New("failed to build PV object: missing PV path")) return b } - // It is possible that the HostPath doesn't already exist on the node. - // Set the Local PV to create it. - hostPathType := corev1.HostPathDirectoryOrCreate - volumeSource := corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ + Local: &corev1.LocalVolumeSource{ Path: path, - Type: &hostPathType, }, } diff --git a/pkg/kubernetes/persistentvolume/v1alpha1/build_test.go b/pkg/kubernetes/persistentvolume/v1alpha1/build_test.go index 2982b781d9..60d262fde0 100644 --- a/pkg/kubernetes/persistentvolume/v1alpha1/build_test.go +++ b/pkg/kubernetes/persistentvolume/v1alpha1/build_test.go @@ -201,7 +201,7 @@ func TestBuildWithCapacity(t *testing.T) { } } -func TestBuildWithHostDirectory(t *testing.T) { +func TestBuildWithLocalHostDirectory(t *testing.T) { tests := map[string]struct { path string builder *Builder @@ -225,7 +225,7 @@ func TestBuildWithHostDirectory(t *testing.T) { for name, mock := range tests { name, mock := name, mock t.Run(name, func(t *testing.T) { - b := mock.builder.WithHostDirectory(mock.path) + b := mock.builder.WithLocalHostDirectory(mock.path) if mock.expectErr && len(b.errs) == 0 { t.Fatalf("Test %q failed: expected error not to be nil", name) } @@ -273,8 +273,6 @@ func TestBuildWithNodeAffinity(t *testing.T) { func TestBuildHostPath(t *testing.T) { - hostPathType := corev1.HostPathDirectoryOrCreate - tests := map[string]struct { name string capacity string @@ -295,9 +293,9 @@ func TestBuildHostPath(t *testing.T) { corev1.ResourceStorage: fakeCapacity("10Ti"), }, PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/openebs/local/PV1", - Type: &hostPathType, + Local: &corev1.LocalVolumeSource{ + Path: "/var/openebs/local/PV1", + FSType: nil, }, }, NodeAffinity: &corev1.VolumeNodeAffinity{ @@ -336,7 +334,7 @@ func TestBuildHostPath(t *testing.T) { pvObj, err := NewBuilder(). WithName(mock.name). WithCapacity(mock.capacity). - WithHostDirectory(mock.path). + WithLocalHostDirectory(mock.path). WithNodeAffinity(mock.nodeName). Build() if mock.expectedErr && err == nil { diff --git a/pkg/kubernetes/pod/v1alpha1/build.go b/pkg/kubernetes/pod/v1alpha1/build.go index 3ceee0dfb4..0f47ecdd19 100644 --- a/pkg/kubernetes/pod/v1alpha1/build.go +++ b/pkg/kubernetes/pod/v1alpha1/build.go @@ -18,6 +18,8 @@ package v1alpha1 import ( errors "github.com/openebs/maya/pkg/errors/v1alpha1" + container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" corev1 "k8s.io/api/core/v1" ) @@ -35,15 +37,67 @@ func NewBuilder() *Builder { // WithName sets the Name field of Pod with provided value. func (b *Builder) WithName(name string) *Builder { if len(name) == 0 { - b.errs = append(b.errs, errors.New("failed to build Pod object: missing Pod name")) + b.errs = append( + b.errs, + errors.New("failed to build Pod object: missing Pod name"), + ) return b } b.pod.object.Name = name return b } +// WithNamespace sets the Namespace field of Pod with provided value. +func (b *Builder) WithNamespace(namespace string) *Builder { + if len(namespace) == 0 { + b.errs = append( + b.errs, + errors.New("failed to build Pod object: missing namespace"), + ) + return b + } + b.pod.object.Namespace = namespace + return b +} + +// WithContainerBuilder adds a container to this pod object. +// +// NOTE: +// container details are present in the provided container +// builder object +func (b *Builder) WithContainerBuilder( + containerBuilder *container.Builder, +) *Builder { + containerObj, err := containerBuilder.Build() + if err != nil { + b.errs = append(b.errs, errors.Wrap(err, "failed to build pod")) + return b + } + b.pod.object.Spec.Containers = append( + b.pod.object.Spec.Containers, + containerObj, + ) + return b +} + +// WithVolumeBuilder sets Volumes field of deployment. +func (b *Builder) WithVolumeBuilder(volumeBuilder *volume.Builder) *Builder { + vol, err := volumeBuilder.Build() + if err != nil { + b.errs = append(b.errs, errors.Wrap(err, "failed to build deployment")) + return b + } + b.pod.object.Spec.Volumes = append( + b.pod.object.Spec.Volumes, + *vol, + ) + return b +} + // WithRestartPolicy sets the RestartPolicy field in Pod with provided arguments -func (b *Builder) WithRestartPolicy(restartPolicy corev1.RestartPolicy) *Builder { +func (b *Builder) WithRestartPolicy( + restartPolicy corev1.RestartPolicy, +) *Builder { b.pod.object.Spec.RestartPolicy = restartPolicy return b } @@ -51,7 +105,10 @@ func (b *Builder) WithRestartPolicy(restartPolicy corev1.RestartPolicy) *Builder // WithNodeName sets the NodeName field of Pod with provided value. func (b *Builder) WithNodeName(nodeName string) *Builder { if len(nodeName) == 0 { - b.errs = append(b.errs, errors.New("failed to build Pod object: missing Pod node name")) + b.errs = append( + b.errs, + errors.New("failed to build Pod object: missing Pod node name"), + ) return b } b.pod.object.Spec.NodeName = nodeName @@ -61,23 +118,39 @@ func (b *Builder) WithNodeName(nodeName string) *Builder { // WithContainers sets the Containers field in Pod with provided arguments func (b *Builder) WithContainers(containers []corev1.Container) *Builder { if len(containers) == 0 { - b.errs = append(b.errs, errors.New("failed to build Pod object: missing containers")) + b.errs = append( + b.errs, + errors.New("failed to build Pod object: missing containers"), + ) return b } b.pod.object.Spec.Containers = containers return b } +// WithContainer sets the Containers field in Pod with provided arguments +func (b *Builder) WithContainer(container corev1.Container) *Builder { + return b.WithContainers([]corev1.Container{container}) +} + // WithVolumes sets the Volumes field in Pod with provided arguments func (b *Builder) WithVolumes(volumes []corev1.Volume) *Builder { if len(volumes) == 0 { - b.errs = append(b.errs, errors.New("failed to build Pod object: missing volumes")) + b.errs = append( + b.errs, + errors.New("failed to build Pod object: missing volumes"), + ) return b } b.pod.object.Spec.Volumes = volumes return b } +// WithVolume sets the Volumes field in Pod with provided arguments +func (b *Builder) WithVolume(volume corev1.Volume) *Builder { + return b.WithVolumes([]corev1.Volume{volume}) +} + // Build returns the Pod API instance func (b *Builder) Build() (*corev1.Pod, error) { if len(b.errs) > 0 { diff --git a/pkg/kubernetes/pod/v1alpha1/kubernetes.go b/pkg/kubernetes/pod/v1alpha1/kubernetes.go index c0ed32ccb3..f74ab6d11a 100644 --- a/pkg/kubernetes/pod/v1alpha1/kubernetes.go +++ b/pkg/kubernetes/pod/v1alpha1/kubernetes.go @@ -15,14 +15,17 @@ package v1alpha1 import ( + "bytes" "encoding/json" errors "github.com/openebs/maya/pkg/errors/v1alpha1" + client "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - client "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" ) // getClientsetFn is a typed function that @@ -33,6 +36,18 @@ type getClientsetFn func() (clientset *clientset.Clientset, err error) // abstracts fetching of clientset from kubeConfigPath type getClientsetForPathFn func(kubeConfigPath string) (clientset *clientset.Clientset, err error) +// getKubeConfigFn is a typed function that abstracts fetching +// rest config +type getKubeConfigFn func() (config *rest.Config, err error) + +// getKubeConfigForPathFn is a typed function that +// abstracts fetching of config from kubeConfigPath +type getKubeConfigForPathFn func(kubeConfigPath string) (config *rest.Config, err error) + +// createFn is a typed function that abstracts +// creation of pod +type createFn func(cli *clientset.Clientset, namespace string, pod *corev1.Pod) (*corev1.Pod, error) + // listFn is a typed function that abstracts // listing of pods type listFn func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error) @@ -45,6 +60,44 @@ type deleteFn func(cli *clientset.Clientset, namespace, name string, opts *metav // to get pod type getFn func(cli *clientset.Clientset, namespace, name string, opts metav1.GetOptions) (*corev1.Pod, error) +// execFn is a typed function that abstracts +// pod exec +type execFn func(cli *clientset.Clientset, config *rest.Config, name, namespace string, opts *corev1.PodExecOptions) (*ExecOutput, error) + +// defaultExec is the default implementation of execFn +func defaultExec(cli *clientset.Clientset, config *rest.Config, name, namespace string, + opts *corev1.PodExecOptions) (*ExecOutput, error) { + var stdout, stderr bytes.Buffer + req := cli.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(name). + Namespace(namespace). + SubResource("exec"). + VersionedParams(opts, scheme.ParameterCodec) + // create exec executor which is an interface for transporting shell-style streams. + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + if err != nil { + return nil, errors.Wrapf(err, + "failed to exec into pod {%s}: failed to connect to the provided server", name) + } + // Stream initiates the transport of the standard shell streams. It will transport any + // non-nil stream to a remote system, and return an error if a problem occurs. + err = exec.Stream(remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: opts.TTY, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to exec into pod {%s}: failed to stream", name) + } + execOutput := &ExecOutput{ + Stdout: stdout.String(), + Stderr: stderr.String(), + } + return execOutput, nil +} + // KubeClient enables kubernetes API operations // on pod instance type KubeClient struct { @@ -57,15 +110,28 @@ type KubeClient struct { // KubeClient has to operate namespace string + // kubeConfig represents kubernetes config + kubeConfig *rest.Config + // kubeconfig path to get kubernetes clientset kubeConfigPath string // functions useful during mocking - getClientset getClientsetFn - getClientsetForPath getClientsetForPathFn - list listFn - del deleteFn - get getFn + getKubeConfig getKubeConfigFn + getKubeConfigForPath getKubeConfigForPathFn + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + create createFn + list listFn + del deleteFn + get getFn + exec execFn +} + +// ExecOutput struct contains stdout and stderr +type ExecOutput struct { + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` } // KubeClientBuildOption defines the abstraction @@ -75,6 +141,16 @@ type KubeClientBuildOption func(*KubeClient) // withDefaults sets the default options // of KubeClient instance func (k *KubeClient) withDefaults() { + if k.getKubeConfig == nil { + k.getKubeConfig = func() (config *rest.Config, err error) { + return client.New().Config() + } + } + if k.getKubeConfigForPath == nil { + k.getKubeConfigForPath = func(kubeConfigPath string) (config *rest.Config, err error) { + return client.New(client.WithKubeConfigPath(kubeConfigPath)).GetConfigForPathOrDirect() + } + } if k.getClientset == nil { k.getClientset = func() (clients *clientset.Clientset, err error) { return client.New().Clientset() @@ -85,6 +161,11 @@ func (k *KubeClient) withDefaults() { return client.New(client.WithKubeConfigPath(kubeConfigPath)).Clientset() } } + if k.create == nil { + k.create = func(cli *clientset.Clientset, namespace string, pod *corev1.Pod) (*corev1.Pod, error) { + return cli.CoreV1().Pods(namespace).Create(pod) + } + } if k.list == nil { k.list = func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error) { return cli.CoreV1().Pods(namespace).List(opts) @@ -100,6 +181,9 @@ func (k *KubeClient) withDefaults() { return cli.CoreV1().Pods(namespace).Get(name, opts) } } + if k.exec == nil { + k.exec = defaultExec + } } // WithClientSet sets the kubernetes client against @@ -136,7 +220,15 @@ func (k *KubeClient) WithNamespace(namespace string) *KubeClient { return k } -func (k *KubeClient) getClientsetForPathOrDirect() (*clientset.Clientset, error) { +// WithKubeConfig sets the kubernetes config against +// the KubeClient instance +func (k *KubeClient) WithKubeConfig(config *rest.Config) *KubeClient { + k.kubeConfig = config + return k +} + +func (k *KubeClient) getClientsetForPathOrDirect() ( + *clientset.Clientset, error) { if k.kubeConfigPath != "" { return k.getClientsetForPath(k.kubeConfigPath) } @@ -158,6 +250,28 @@ func (k *KubeClient) getClientsetOrCached() (*clientset.Clientset, error) { return k.clientset, nil } +func (k *KubeClient) getKubeConfigForPathOrDirect() (*rest.Config, error) { + if k.kubeConfigPath != "" { + return k.getKubeConfigForPath(k.kubeConfigPath) + } + return k.getKubeConfig() +} + +// getKubeConfigOrCached returns either a new instance +// of kubernetes config or its cached copy +func (k *KubeClient) getKubeConfigOrCached() (*rest.Config, error) { + if k.kubeConfig != nil { + return k.kubeConfig, nil + } + + kc, err := k.getKubeConfigForPathOrDirect() + if err != nil { + return nil, errors.Wrapf(err, "failed to get kube config") + } + k.kubeConfig = kc + return k.kubeConfig, nil +} + // List returns a list of pod // instances present in kubernetes cluster func (k *KubeClient) List(opts metav1.ListOptions) (*corev1.PodList, error) { @@ -175,29 +289,81 @@ func (k *KubeClient) Delete(name string, opts *metav1.DeleteOptions) error { } cli, err := k.getClientsetOrCached() if err != nil { - return errors.Wrapf(err, "failed to delete pod {%s}: failed to get clientset", name) + return errors.Wrapf( + err, + "failed to delete pod {%s}: failed to get clientset", + name, + ) } return k.del(cli, k.namespace, name, opts) } +// Create creates a pod in specified namespace in kubernetes cluster +func (k *KubeClient) Create(pod *corev1.Pod) (*corev1.Pod, error) { + if pod == nil { + return nil, errors.New("failed to create pod: nil pod object") + } + cli, err := k.getClientsetOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to create pod {%s} in namespace {%s}", + pod.Name, + pod.Namespace, + ) + } + return k.create(cli, k.namespace, pod) +} + // Get gets a pod object present in kubernetes cluster -func (k *KubeClient) Get(name string, opts metav1.GetOptions) (*corev1.Pod, error) { +func (k *KubeClient) Get(name string, + opts metav1.GetOptions) (*corev1.Pod, error) { if len(name) == 0 { return nil, errors.New("failed to get pod: missing pod name") } cli, err := k.getClientsetOrCached() if err != nil { - return nil, errors.Wrapf(err, "failed to get pod {%s}: failed to get clientset", name) + return nil, errors.Wrapf( + err, + "failed to get pod {%s}: failed to get clientset", + name, + ) } return k.get(cli, k.namespace, name, opts) } // GetRaw gets pod object for a given name and namespace present // in kubernetes cluster and returns result in raw byte. -func (k *KubeClient) GetRaw(name string, opts metav1.GetOptions) ([]byte, error) { +func (k *KubeClient) GetRaw(name string, + opts metav1.GetOptions) ([]byte, error) { p, err := k.Get(name, opts) if err != nil { return nil, err } return json.Marshal(p) } + +// Exec runs a command remotely in a container of a pod +func (k *KubeClient) Exec(name string, + opts *corev1.PodExecOptions) (*ExecOutput, error) { + cli, err := k.getClientsetOrCached() + if err != nil { + return nil, err + } + config, err := k.getKubeConfigOrCached() + if err != nil { + return nil, err + } + return k.exec(cli, config, name, k.namespace, opts) +} + +// ExecRaw runs a command remotely in a container of a pod +// and returns raw output +func (k *KubeClient) ExecRaw(name string, + opts *corev1.PodExecOptions) ([]byte, error) { + execOutput, err := k.Exec(name, opts) + if err != nil { + return nil, err + } + return json.Marshal(execOutput) +} diff --git a/pkg/kubernetes/pod/v1alpha1/pod.go b/pkg/kubernetes/pod/v1alpha1/pod.go index b9b99fddcc..1eec3999c0 100644 --- a/pkg/kubernetes/pod/v1alpha1/pod.go +++ b/pkg/kubernetes/pod/v1alpha1/pod.go @@ -37,17 +37,28 @@ type predicateList []Predicate type Predicate func(*Pod) bool // ToAPIList converts PodList to API PodList -func (p *PodList) ToAPIList() *corev1.PodList { +func (pl *PodList) ToAPIList() *corev1.PodList { plist := &corev1.PodList{} - for _, pod := range p.items { + for _, pod := range pl.items { plist.Items = append(plist.Items, *pod.object) } return plist } +type podBuildOption func(*Pod) + +// NewForAPIObject returns a new instance of Pod +func NewForAPIObject(obj *corev1.Pod, opts ...podBuildOption) *Pod { + p := &Pod{object: obj} + for _, o := range opts { + o(p) + } + return p +} + // Len returns the number of items present in the PodList -func (p *PodList) Len() int { - return len(p.items) +func (pl *PodList) Len() int { + return len(pl.items) } // all returns true if all the predicates @@ -142,3 +153,31 @@ func IsNil() Predicate { func (p *Pod) GetAPIObject() *corev1.Pod { return p.object } + +// FromList created a PodList with provided api podlist +func FromList(pods *corev1.PodList) *PodList { + pl := ListBuilderForAPIList(pods). + List() + return pl +} + +// GetScheduledNodes returns the nodes on which pods are scheduled +func (pl *PodList) GetScheduledNodes() map[string]int { + nodeNames := make(map[string]int) + for _, p := range pl.items { + p := p // pin it + nodeNames[p.object.Spec.NodeName]++ + } + return nodeNames +} + +// IsMatchNodeAny checks the PodList is running on the provided nodes +func (pl *PodList) IsMatchNodeAny(nodes map[string]int) bool { + for _, p := range pl.items { + p := p // pin it + if nodes[p.object.Spec.NodeName] == 0 { + return false + } + } + return true +} diff --git a/pkg/kubernetes/podexec/v1alpha1/podexec.go b/pkg/kubernetes/podexec/v1alpha1/podexec.go index f4fb514fb2..88a9aaac50 100644 --- a/pkg/kubernetes/podexec/v1alpha1/podexec.go +++ b/pkg/kubernetes/podexec/v1alpha1/podexec.go @@ -20,11 +20,11 @@ import ( "fmt" "github.com/ghodss/yaml" - "github.com/openebs/maya/pkg/template" api_core_v1 "k8s.io/api/core/v1" ) -type podexec struct { +// PodExec represents the details of pod exec options +type PodExec struct { object *api_core_v1.PodExecOptions ignoreErrors bool errs []error @@ -32,7 +32,7 @@ type podexec struct { // AsAPIPodExec validate and returns PodExecOptions object pointer and error // depending on ignoreErrors opt and errors -func (p *podexec) AsAPIPodExec() (*api_core_v1.PodExecOptions, error) { +func (p *PodExec) AsAPIPodExec() (*api_core_v1.PodExecOptions, error) { err := p.Validate() if err != nil && !p.ignoreErrors { return nil, err @@ -40,28 +40,23 @@ func (p *podexec) AsAPIPodExec() (*api_core_v1.PodExecOptions, error) { return p.object, nil } -// WithTemplate takes Yaml values which is given in runtask and key in which configuration -// is present and unmarshal it with PodExecOptions. -func WithTemplate(context, yamlString string, values map[string]interface{}) (p *podexec) { - p = &podexec{} - b, err := template.AsTemplatedBytes(context, yamlString, values) - if err != nil { - p.errs = append(p.errs, err) - return - } +// BuilderForYAMLObject returns a new instance +// of Builder for a given template object +func BuilderForYAMLObject(object []byte) *PodExec { + p := &PodExec{} exec := &api_core_v1.PodExecOptions{} - err = yaml.Unmarshal(b, exec) + err := yaml.Unmarshal(object, exec) if err != nil { p.errs = append(p.errs, err) - return + return p } p.object = exec - return + return p } // Validate validates PodExecOptions it mainly checks for container name is present or not and // commands are present or not. -func (p *podexec) Validate() error { +func (p *PodExec) Validate() error { if len(p.errs) != 0 { return fmt.Errorf("validation failed: %v", p.errs) } @@ -74,17 +69,19 @@ func (p *podexec) Validate() error { return nil } -type buildOption func(*podexec) +// BuildOption represents the various build options +// against PodExec operation +type BuildOption func(*PodExec) // IgnoreErrors is a buildOption that is used ignore errors -func IgnoreErrors() buildOption { - return func(p *podexec) { +func IgnoreErrors() BuildOption { + return func(p *PodExec) { p.ignoreErrors = true } } -// Apply applies all build options in podexec -func (p *podexec) Apply(opts ...buildOption) *podexec { +// Apply applies all build options in PodExec +func (p *PodExec) Apply(opts ...BuildOption) *PodExec { for _, o := range opts { o(p) } diff --git a/pkg/kubernetes/storageclass/v1alpha1/build.go b/pkg/kubernetes/storageclass/v1alpha1/build.go index 80d13fab47..a05c433305 100644 --- a/pkg/kubernetes/storageclass/v1alpha1/build.go +++ b/pkg/kubernetes/storageclass/v1alpha1/build.go @@ -42,6 +42,12 @@ func (b *Builder) WithName(name string) *Builder { return b } +// WithGenerateName appends a random string after the name +func (b *Builder) WithGenerateName(name string) *Builder { + b.sc.object.GenerateName = name + "-" + return b +} + // WithAnnotations sets the Annotations field of storageclass with provided value. func (b *Builder) WithAnnotations(annotations map[string]string) *Builder { if len(annotations) == 0 { diff --git a/pkg/kubernetes/volume/v1alpha1/build.go b/pkg/kubernetes/volume/v1alpha1/build.go index f84f48d454..129ed45be2 100644 --- a/pkg/kubernetes/volume/v1alpha1/build.go +++ b/pkg/kubernetes/volume/v1alpha1/build.go @@ -59,6 +59,21 @@ func (b *Builder) WithHostDirectory(path string) *Builder { return b } +// WithPVCSource sets the Volume field of Volume with provided pvc +func (b *Builder) WithPVCSource(pvcName string) *Builder { + if len(pvcName) == 0 { + b.errs = append(b.errs, errors.New("failed to build volume object: missing pvc name")) + return b + } + volumeSource := corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + } + b.volume.object.VolumeSource = volumeSource + return b +} + // Build returns the Volume API instance func (b *Builder) Build() (*corev1.Volume, error) { if len(b.errs) > 0 { diff --git a/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go b/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go index 187697c919..0c352ee1e3 100644 --- a/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go +++ b/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go @@ -17,6 +17,9 @@ limitations under the License. package v1alpha1 import ( + "fmt" + "time" + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "k8s.io/apimachinery/pkg/types" ) @@ -116,6 +119,12 @@ func (sb *Builder) WithName(name string) *Builder { return sb } +// WithGenerateName appends a random string after the name +func (sb *Builder) WithGenerateName(name string) *Builder { + name = name + "-" + fmt.Sprintf("%d", time.Now().UnixNano()) + return sb.WithName(name) +} + // WithDiskType sets the Type field of spc with provided argument value. func (sb *Builder) WithDiskType(diskType string) *Builder { sb.Spc.Object.Spec.Type = diskType diff --git a/pkg/task/task.go b/pkg/task/task.go index 23574101d3..1ac6991054 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -1317,7 +1317,7 @@ func (m *executor) extnV1B1DeploymentRollOutStatus() (err error) { // appsV1DeploymentRollOutStatus generates rollout status for a given deployment from deployment object func (m *executor) appsV1DeploymentRollOutStatus() (err error) { - dclient := deploy_appsv1.KubeClient( + dclient := deploy_appsv1.NewKubeClient( deploy_appsv1.WithNamespace(m.getTaskRunNamespace()), deploy_appsv1.WithClientset(m.getK8sClient().GetKCS())) res, err := dclient.RolloutStatusf(m.getTaskObjectName()) @@ -1331,7 +1331,7 @@ func (m *executor) appsV1DeploymentRollOutStatus() (err error) { // getAppsV1Deployment will get the Deployment as specified in the RunTask func (m *executor) getAppsV1Deployment() (err error) { - dclient := deploy_appsv1.KubeClient( + dclient := deploy_appsv1.NewKubeClient( deploy_appsv1.WithNamespace(m.getTaskRunNamespace()), deploy_appsv1.WithClientset(m.getK8sClient().GetKCS())) d, err := dclient.GetRaw(m.getTaskObjectName()) @@ -1556,21 +1556,25 @@ func (m *executor) deleteOEV1alpha1CSV() (err error) { // execCoreV1Pod runs given command remotely in given container of given pod // and post stdout and and stderr in JsonResult. You can get it using - -// {{- jsonpath .JsonResult "{.Stdout}" | trim | saveAs "XXX" .TaskResult | noop -}} -func (m *executor) execCoreV1Pod() (err error) { - podexecopts, err := podexec.WithTemplate("execCoreV1Pod", m.Runtask.Spec.Task, m.Values). - AsAPIPodExec() +// {{- jsonpath .JsonResult "{.stdout}" | trim | saveAs "XXX" .TaskResult | noop -}} +func (m *executor) execCoreV1Pod() error { + raw, err := template.AsTemplatedBytes("execCoreV1Pod", m.Runtask.Spec.Task, m.Values) if err != nil { - return errors.Wrap(err, "failed to run pod exec") + return errors.Wrap(err, "failed to run templating on pod exec object") } - - result, err := m.getK8sClient().ExecCoreV1Pod(m.getTaskObjectName(), podexecopts) + podexecopts, err := podexec.BuilderForYAMLObject(raw).AsAPIPodExec() + if err != nil { + return errors.Wrap(err, "failed to build pod exec options") + } + execRaw, err := pod.NewKubeClient(). + WithNamespace(m.getTaskRunNamespace()). + ExecRaw(m.getTaskObjectName(), podexecopts) if err != nil { return errors.Wrap(err, "failed to run pod exec") } - util.SetNestedField(m.Values, result, string(v1alpha1.CurrentJSONResultTLP)) - return + util.SetNestedField(m.Values, execRaw, string(v1alpha1.CurrentJSONResultTLP)) + return nil } // rolloutStatus generates rollout status of a given resource form it's object details diff --git a/pkg/version/kubernetes/version_test.go b/pkg/version/kubernetes/version_test.go index 43ec4e77db..934171f039 100644 --- a/pkg/version/kubernetes/version_test.go +++ b/pkg/version/kubernetes/version_test.go @@ -128,6 +128,7 @@ func TestCompare(t *testing.T) { "valid & gt - 4": {"v0.0.5-beta", "v0.0.5-alpha", 1}, "valid & gt - 5": {"v0.0.5", "v0.0.5-alpha", 1}, "valid & gt - 6": {"v0.0.5-beta.2", "v0.0.5-beta.1", 1}, + "valid & gt - 7": {"v1.12.7-gke.10", "v1.12.0", 1}, // less than "valid & lt - 1": {"v0.0.1", "v0.0.5", -1}, "valid & lt - 2": {"v0.1.1", "v0.2.0", -1}, @@ -354,6 +355,8 @@ func TestTemplateFuncs(t *testing.T) { "version2": "v1.0.1-beta", "version3": "v1.1.1", "version4": "v1.1.1-ga", + "version5": "v1.12.7-gke.10", + "version6": "v1.12.0", }, template: ` one: {{kubeVersionEq .version3 .version4}} @@ -365,6 +368,10 @@ six: {{kubeVersionLte .version3 .version4}} seven: {{kubeVersionGt .version1 .version2}} eight: {{kubeVersionLt .version1 .version2}} nine: {{kubeVersionLt .version2 .version4}} +ten: {{kubeVersionGte .version5 .version6}} +{{- if kubeVersionGte .version5 .version6 }} +passed: true +{{- end }} `, expected: ` one: true @@ -376,6 +383,8 @@ six: true seven: false eight: true nine: true +ten: true +passed: true `, }, } diff --git a/tests/admission/admission_suite_test.go b/tests/admission/admission_suite_test.go deleted file mode 100644 index fb0b1f8d4d..0000000000 --- a/tests/admission/admission_suite_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2019 The OpenEBS Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package admission_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestAdmission(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Admission Suite") -} diff --git a/tests/admission/admission_test.go b/tests/admission/admission_test.go index 9431344e84..945f80b31b 100644 --- a/tests/admission/admission_test.go +++ b/tests/admission/admission_test.go @@ -1,309 +1,440 @@ -// Copyright © 2019 The OpenEBS Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package admission import ( - "fmt" + "strconv" + "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - k8s "github.com/openebs/maya/pkg/client/k8s/v1alpha1" - cv "github.com/openebs/maya/pkg/cstorvolume/v1alpha1" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" - "github.com/openebs/maya/tests/artifacts" - framework "github.com/openebs/maya/tests/framework/v1alpha1" - corev1 "k8s.io/api/core/v1" + snap "github.com/openebs/maya/pkg/kubernetes/snapshot/v1alpha1" + sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -const ( - // namespaceYaml holds the yaml spec - // to create admission namespace - namespaceYaml artifacts.Artifact = ` -apiVersion: v1 -kind: Namespace -metadata: - name: admission -` - // cStorPVC holds the yaml spec - // for source persistentvolumeclaim - cStorPVC artifacts.Artifact = ` -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: cstor-source-volume - namespace: admission - labels: - name: cstor-source-volume -spec: - storageClassName: openebs-cstor-class - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "2G" -` - // singleReplicaSC holds the yaml spec - // for pool with single replica - singleReplicaSC artifacts.Artifact = ` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: openebs-cstor-class - annotations: - cas.openebs.io/config: | - - name: StoragePoolClaim - value: "cstor-sparse-pool" - - name: ReplicaCount - value: "1" - openebs.io/cas-type: cstor -provisioner: openebs.io/provisioner-iscsi -reclaimPolicy: Delete -` - // clonePVCYaml holds the yaml spec - // for clone persistentvolumeclaim - clonePVCYaml artifacts.Artifact = ` -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: test-snap-claim - namespace: admission - labels: - name: test-snap-claim - annotations: - snapshot.alpha.kubernetes.io/snapshot: snapshot-cstor -spec: - storageClassName: openebs-snapshot-promoter - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2G` - // cstorSnapshotYaml holds the yaml spec - // for volume snapshot - cstorSnapshotYaml artifacts.Artifact = ` -apiVersion: volumesnapshot.external-storage.k8s.io/v1 -kind: VolumeSnapshot -metadata: - name: snapshot-cstor - namespace: admission -spec: - persistentVolumeClaimName: cstor-source-volume -` ) -var _ = Describe("[single-node] [cstor] AdmissionWebhook", func() { - options := framework.FrameworkOptions{ - MinNodeCount: 1, - Artifacts: "../artifacts/openebs-ci.yaml", - } - _ = framework.NewFrameworkDefault("AdmissionWebhook pvc delete", options) - +var _ = Describe("[cstor] TEST ADMISSION SERVER VALIDATION", func() { var ( - NSUnst, SCUnst, PVCUnst *unstructured.Unstructured - pvclaim *corev1.PersistentVolumeClaim + err error + pvcName = "test-cstor-admission-pvc" + snapName = "test-cstor-admission-snapshot" + clonepvcName = "test-cstor-admission-pvc-cloned" ) + BeforeEach(func() { - // Extracting storageclass artifacts unstructured - var err error - SCUnst, err = artifacts.GetArtifactUnstructured(singleReplicaSC) - Expect(err).ShouldNot(HaveOccurred()) - - // Apply single replica storageclass - cu := k8s.CreateOrUpdate( - k8s.GroupVersionResourceFromGVK(SCUnst), - SCUnst.GetNamespace(), - ) - - _, err = cu.Apply(SCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - // Creates admission namespace - NSUnst, err = artifacts.GetArtifactUnstructured( - artifacts.Artifact(namespaceYaml), - ) - Expect(err).ShouldNot(HaveOccurred()) - - cu = k8s.CreateOrUpdate( - k8s.GroupVersionResourceFromGVK(NSUnst), - NSUnst.GetNamespace(), - ) - _, err = cu.Apply(NSUnst) - Expect(err).ShouldNot(HaveOccurred()) - - // Extracting PVC artifacts unstructured - PVCUnst, err = artifacts.GetArtifactUnstructured(cStorPVC) - Expect(err).ShouldNot(HaveOccurred()) - - // Create pvc using storageclass 'cstor-sparse-class' - By(fmt.Sprintf("Creating pvc '%s' in '%s' namespace", PVCUnst.GetName(), PVCUnst.GetNamespace())) - cu = k8s.CreateOrUpdate( - k8s.GroupVersionResourceFromGVK(PVCUnst), - PVCUnst.GetNamespace(), - ) - _, err = cu.Apply(PVCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - By("verifying pvc to be created and bound with pv") - Eventually(func() bool { - pvclaim, err = pvc. - NewKubeClient(). - WithNamespace(PVCUnst.GetNamespace()). - Get(PVCUnst.GetName(), metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - return pvc. - NewForAPIObject(pvclaim).IsBound() - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(BeTrue()) - - // Check for cstorvolume to get healthy - Eventually(func() bool { - cstorvolume, err := cv. - NewKubeclient(cv.WithNamespace("openebs")). - Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - return cv. - NewForAPIObject(cstorvolume).IsHealthy() - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(BeTrue()) + When("deploying cstor sparse pool", func() { + By("building storagepoolclaim") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating above storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating spc", spcName) + + By("verifying healthy cstorpool count") + cspCount := ops.GetHealthyCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(1), "while checking cstorpool health count") + + By("building a CAS Config with generated SPC name") + CASConfig := strings.Replace(openebsCASConfigValue, "$spcName", spcObj.Name, 1) + CASConfig = strings.Replace(CASConfig, "$count", strconv.Itoa(cstor.ReplicaCount), 1) + annotations[string(apis.CASTypeKey)] = string(apis.CstorVolume) + annotations[string(apis.CASConfigKey)] = CASConfig + + By("building storageclass object") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot(HaveOccurred(), "while building storageclass obj for storageclass {%s}", scName) + + By("creating storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass", scName) + + }) }) AfterEach(func() { - By(fmt.Sprintf("deleting PVC '%s' as part of teardown", PVCUnst.GetName())) - // Delete the PVC artifacts - cu := k8s.DeleteResource( - k8s.GroupVersionResourceFromGVK(PVCUnst), - PVCUnst.GetNamespace(), - ) - err := cu.Delete(PVCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - // Verify deletion of pvc instances - Eventually(func() int { - pvcs, err := pvc. - NewKubeClient(). - WithNamespace(PVCUnst.GetNamespace()). - List(metav1.ListOptions{LabelSelector: "name=cstor-source-volume"}) - Expect(err).ShouldNot(HaveOccurred()) - return len(pvcs.Items) - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(Equal(0), "pvc count should be 0") - - CstorVolumeLabel := "openebs.io/persistent-volume=" + pvclaim.Spec.VolumeName - - // verify deletion of cstorvolume - Eventually(func() int { - cvs, err := cv. - NewKubeclient(cv.WithNamespace("openebs")). - List(metav1.ListOptions{LabelSelector: CstorVolumeLabel}) - Expect(err).ShouldNot(HaveOccurred()) - return len(cvs.Items) - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(Equal(0), "cStorvolume count should be 0") - - sc := k8s.DeleteResource( - k8s.GroupVersionResourceFromGVK(SCUnst), - SCUnst.GetNamespace(), - ) - err = sc.Delete(SCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - ns := k8s.DeleteResource( - k8s.GroupVersionResourceFromGVK(NSUnst), - "", - ) - err = ns.Delete(NSUnst) - Expect(err).ShouldNot(HaveOccurred()) + By("deleting resources created for cstor volume snapshot provisioning", func() { + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the spc's {%s}", spcName) + + By("deleting storageclass") + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting storageclass", scName) + + }) + }) + + When("cstor pvc with replicacount 1 is created", func() { + It("should create cstor volume target pod", func() { + + By("building a persistentvolumeclaim") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(nsObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("creating cstor persistentvolumeclaim") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + By("verifying volume target pod count as 1") + targetVolumeLabel := pvcLabel + pvcObj.Name + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + + By("verifying cstorvolume replica count") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(pvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + cvrLabel := pvLabel + pvcObj.Spec.VolumeName + cvrCount := ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, cvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + + By("verifying pvc status as bound") + status := ops.IsPVCBoundEventually(pvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + By("verifying cstorVolume status as healthy") + CstorVolumeLabel := "openebs.io/persistent-volume=" + pvcObj.Spec.VolumeName + cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 1) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + + By("building a cstor volume snapshot") + snapObj, err = snap.NewBuilder(). + WithName(snapName). + WithNamespace(nsObj.Name). + WithPVC(pvcName). + Build() + Expect(err).To( + BeNil(), + "while building snapshot {%s} in namespace {%s}", + snapName, + nsName, + ) + + By("creating cstor volume snapshot") + _, err = ops.SnapClient.WithNamespace(nsObj.Name).Create(snapObj) + Expect(err).To( + BeNil(), + "while creating snapshot {%s} in namespace {%s}", + snapName, + nsName, + ) + + snaptype := ops.GetSnapshotTypeEventually(snapName) + Expect(snaptype).To(Equal("Ready"), "while checking snapshot type") + + By("builing clone persistentvolumeclaim") + cloneAnnotations := map[string]string{ + "snapshot.alpha.kubernetes.io/snapshot": snapName, + } + + clonepvcObj, err = pvc.NewBuilder(). + WithName(clonepvcName). + WithAnnotations(cloneAnnotations). + WithNamespace(nsObj.Name). + WithStorageClass(clonescName). + WithAccessModes(accessModes). + WithCapacity(capacity). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building clone pvc {%s} in namespace {%s}", + clonepvcName, + nsName, + ) + + By("creating clone persistentvolumeclaim") + _, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(clonepvcObj) + Expect(err).To( + BeNil(), + "while creating clone pvc {%s} in namespace {%s}", + clonepvcName, + nsName, + ) + + By("verifying clone volume target pod count") + + clonetargetLabel := pvcLabel + clonepvcName + clonePodCount := ops.GetPodRunningCountEventually(openebsNamespace, clonetargetLabel, 1) + Expect(clonePodCount).To(Equal(1), "while checking clone pvc pod count") + + By("verifying clone volumeereplica count") + clonepvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(clonepvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + clonecvrLabel := pvLabel + clonepvcObj.Spec.VolumeName + cvrCount = ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, clonecvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + + By("verifying clone pvc status as bound") + status = ops.IsPVCBoundEventually(clonepvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + By("deleting source pvc which failed to delete due to clone pvc exists") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).ToNot( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("deleting clone persistentvolumeclaim") + err = ops.PVCClient.Delete(clonepvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("verifying clone target pod count as 0") + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, clonetargetLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted clone pvc") + clonepvc := ops.IsPVCDeleted(clonepvcName) + Expect(clonepvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if clone cstorvolume is deleted") + CstorVolumeLabel = pvLabel + clonepvcObj.Spec.VolumeName + clonecvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(clonecvCount).To(Equal(true), "while checking cstorvolume count") + + By("deleting cstor volume snapshot") + err = ops.SnapClient.Delete(snapName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting snapshot {%s} in namespace {%s}", + snapName, + nsName, + ) + + By("verifying deleted snapshot") + snap := ops.IsSnapshotDeleted(snapName) + Expect(snap).To(Equal(true), "while checking for deleted snapshot") + + By("deleting source persistentvolumeclaim") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("verifying source volume target pod count as 0") + + sourcetargetLabel := pvcLabel + pvcName + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, sourcetargetLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted source pvc") + pvc := ops.IsPVCDeleted(pvcName) + Expect(pvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if source cstorvolume is deleted") + CstorVolumeLabel = pvLabel + pvcObj.Spec.VolumeName + cvCount = ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + + }) }) - Context("Test admission server validation for pvc delete", func() { - It("should deny the deletion of source volume", func() { + When("cstor clone pvc with different size created", func() { + It("should failed to create clone cstor volume", func() { + + By("building a source persistentvolumeclaim") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(nsObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("creating cstor persistentvolumeclaim") + _, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("verifying volume target pod count as 1") + targetVolumeLabel := pvcLabel + pvcObj.Name + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") - By("Creating a snapshot for a given volume") - SnapUnst, err := artifacts.GetArtifactUnstructured(cstorSnapshotYaml) - Expect(err).ShouldNot(HaveOccurred()) - // Apply volume snapshot - cu := k8s.CreateOrUpdate( - k8s.GroupVersionResourceFromGVK(SnapUnst), - SnapUnst.GetNamespace(), + By("verifying cstorvolume replica count") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(pvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsName, ) - _, err = cu.Apply(SnapUnst) - Expect(err).ShouldNot(HaveOccurred()) + cvrLabel := pvLabel + pvcObj.Spec.VolumeName + cvrCount := ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, cvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") - // Extracting clone PVC artifacts unstructured - By("Creating a clone volume using snapshot") - ClonePVCUnst, err := artifacts.GetArtifactUnstructured(clonePVCYaml) - Expect(err).ShouldNot(HaveOccurred()) + By("verifying pvc status as bound") + status := ops.IsPVCBoundEventually(pvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") - cu = k8s.CreateOrUpdate( - k8s.GroupVersionResourceFromGVK(ClonePVCUnst), - ClonePVCUnst.GetNamespace(), + By("verifying cstorVolume status as healthy") + CstorVolumeLabel := pvLabel + pvcObj.Spec.VolumeName + cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 1) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + + By("building a cstor volume snapshot") + snapObj, err = snap.NewBuilder(). + WithName(snapName). + WithNamespace(nsObj.Name). + WithPVC(pvcName). + Build() + Expect(err).To( + BeNil(), + "while building snapshot {%s} in namespace {%s}", + snapName, + nsName, ) - _, err = cu.Apply(ClonePVCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - By(fmt.Sprintf("verifying clone pvc '%s' to be created and bound with pv", ClonePVCUnst.GetName())) - Eventually(func() bool { - pvclone, err := pvc. - NewKubeClient(). - WithNamespace(ClonePVCUnst.GetNamespace()). - Get(ClonePVCUnst.GetName(), metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - return pvc. - NewForAPIObject(pvclone).IsBound() - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(BeTrue()) - - By(fmt.Sprintf("Deleting source PVC '%s' should fail with error", PVCUnst.GetName())) - - del := k8s.DeleteResource( - k8s.GroupVersionResourceFromGVK(PVCUnst), - PVCUnst.GetNamespace(), + + By("creating cstor volume snapshot") + _, err = ops.SnapClient.WithNamespace(nsObj.Name).Create(snapObj) + Expect(err).To( + BeNil(), + "while creating snapshot {%s} in namespace {%s}", + snapName, + nsName, ) - err = del.Delete(PVCUnst) - Expect(err).ToNot(BeNil()) - - By(fmt.Sprintf("Deleting clone persistentvolumeclaim '%s'", ClonePVCUnst.GetName())) - err = del.Delete(ClonePVCUnst) - Expect(err).ShouldNot(HaveOccurred()) - - // Verify deletion of pvc instances - Eventually(func() int { - pvcs, err := pvc. - NewKubeClient(). - WithNamespace(ClonePVCUnst.GetNamespace()). - List(metav1.ListOptions{LabelSelector: "name=test-snap-claim"}) - Expect(err).ShouldNot(HaveOccurred()) - return len(pvcs.Items) - }, - framework.DefaultTimeOut, framework.DefaultPollingInterval). - Should(Equal(0), "pvc count should be 0") - - By("Deleting volume snapshot") - snap := k8s.DeleteResource( - k8s.GroupVersionResourceFromGVK(SnapUnst), - SnapUnst.GetNamespace(), + + snaptype := ops.GetSnapshotTypeEventually(snapName) + Expect(snaptype).To(Equal("Ready"), "while checking snapshot type") + + By("builing clone persistentvolumeclaim") + cloneAnnotations := map[string]string{ + "snapshot.alpha.kubernetes.io/snapshot": snapName, + } + + cloneObj, err := pvc.NewBuilder(). + WithName(clonepvcName). + WithAnnotations(cloneAnnotations). + WithNamespace(nsObj.Name). + WithStorageClass(clonescName). + WithAccessModes(accessModes). + WithCapacity("10G"). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building clone pvc {%s} in namespace {%s}", + clonepvcName, + nsName, ) - err = snap.Delete(SnapUnst) - Expect(err).ShouldNot(HaveOccurred()) + + By("creating clone persistentvolumeclaim should failed to provision") + _, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(cloneObj) + Expect(err).ToNot( + BeNil(), + "while creating clone pvc {%s} in namespace {%s}", + clonepvcName, + nsName, + ) + + By("deleting cstor volume snapshot") + err = ops.SnapClient.Delete(snapName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting snapshot {%s} in namespace {%s}", + snapName, + nsName, + ) + + By("verifying deleted snapshot") + snap := ops.IsSnapshotDeleted(snapName) + Expect(snap).To(Equal(true), "while checking for deleted snapshot") + + By("deleting source persistentvolumeclaim") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsName, + ) + + By("verifying source volume target pod count as 0") + + sourcetargetLabel := pvcLabel + pvcName + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, sourcetargetLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted source pvc") + pvc := ops.IsPVCDeleted(pvcName) + Expect(pvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if source cstorvolume is deleted") + CstorVolumeLabel = pvLabel + pvcObj.Spec.VolumeName + cvCount = ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + }) }) + }) diff --git a/tests/admission/suite_test.go b/tests/admission/suite_test.go new file mode 100644 index 0000000000..9fdf301b15 --- /dev/null +++ b/tests/admission/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/cstor" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + snapshot "github.com/openebs/maya/pkg/apis/openebs.io/snapshot/v1alpha1" + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + openebsNamespace = "openebs" + nsName = "test-cstor-admission" + scName = "test-cstor-admission-sc" + clonescName = "openebs-snapshot-promoter" + openebsCASConfigValue = ` +- name: ReplicaCount + value: $count +- name: StoragePoolClaim + value: $spcName +` + openebsProvisioner = "openebs.io/provisioner-iscsi" + spcName = "test-cstor-snap-sparse-pool" + nsObj *corev1.Namespace + scObj *storagev1.StorageClass + spcObj *apis.StoragePoolClaim + pvcObj, clonepvcObj *corev1.PersistentVolumeClaim + snapObj *snapshot.VolumeSnapshot + pvLabel = "openebs.io/persistent-volume=" + pvcLabel = "openebs.io/persistent-volume-claim=" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + annotations = map[string]string{} +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test admission server validations") +} + +func init() { + cstor.ParseFlags() + +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(cstor.KubeConfigPath)).VerifyOpenebs(1) + var err error + + By("building a namespace") + nsObj, err = ns.NewBuilder(). + WithGenerateName(nsName). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + + By("creating a namespace") + nsObj, err = ops.NSClient.Create(nsObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err := ops.NSClient.Delete(nsObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", nsObj.Name) + +}) diff --git a/tests/artifacts/hostdevice-pv-pod.yaml b/tests/artifacts/hostdevice-pv-pod.yaml index 80e63c9cdd..c0707d6f03 100644 --- a/tests/artifacts/hostdevice-pv-pod.yaml +++ b/tests/artifacts/hostdevice-pv-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: accessModes: - ReadWriteOnce - storageClassName: hostdevice + storageClassName: openebs-device resources: requests: storage: 2Gi diff --git a/tests/artifacts/openebs-ci.go b/tests/artifacts/openebs-ci.go index a9ded81690..ea0f28ff76 100644 --- a/tests/artifacts/openebs-ci.go +++ b/tests/artifacts/openebs-ci.go @@ -58,12 +58,13 @@ const ( type LabelSelector string const ( - MayaAPIServerLabelSelector LabelSelector = "name=maya-apiserver" - OpenEBSProvisionerLabelSelector LabelSelector = "name=openebs-provisioner" - OpenEBSSnapshotOperatorLabelSelector LabelSelector = "name=openebs-snapshot-operator" - OpenEBSAdmissionServerLabelSelector LabelSelector = "app=admission-webhook" - OpenEBSNDMLabelSelector LabelSelector = "name=openebs-ndm" - OpenEBSCStorPoolLabelSelector LabelSelector = "app=cstor-pool" + MayaAPIServerLabelSelector LabelSelector = "name=maya-apiserver" + OpenEBSProvisionerLabelSelector LabelSelector = "name=openebs-provisioner" + OpenEBSLocalPVProvisionerLabelSelector LabelSelector = "name=openebs-localpv-provisioner" + OpenEBSSnapshotOperatorLabelSelector LabelSelector = "name=openebs-snapshot-operator" + OpenEBSAdmissionServerLabelSelector LabelSelector = "app=admission-webhook" + OpenEBSNDMLabelSelector LabelSelector = "name=openebs-ndm" + OpenEBSCStorPoolLabelSelector LabelSelector = "app=cstor-pool" ) func parseK8sYaml(yamls string) (k8s.UnstructedList, []error) { diff --git a/tests/artifacts/openebs-ci.yaml b/tests/artifacts/openebs-ci.yaml index ee83387607..e1b0f499fb 100644 --- a/tests/artifacts/openebs-ci.yaml +++ b/tests/artifacts/openebs-ci.yaml @@ -36,7 +36,7 @@ rules: resources: ["customresourcedefinitions"] verbs: [ "get", "list", "create", "update", "delete"] - apiGroups: ["*"] - resources: [ "disks"] + resources: [ "disks", "blockdevices", "blockdeviceclaims"] verbs: ["*" ] - apiGroups: ["*"] resources: [ "cstorpoolclusters", "storagepoolclaims", "storagepoolclaims/finalizers", "storagepools"] @@ -47,6 +47,9 @@ rules: - apiGroups: ["*"] resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes"] verbs: ["*" ] +- apiGroups: ["*"] + resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"] + verbs: ["*" ] - nonResourceURLs: ["/metrics"] verbs: ["get"] --- @@ -86,7 +89,7 @@ spec: containers: - name: maya-apiserver imagePullPolicy: IfNotPresent - image: openebs/m-apiserver:ci + image: quay.io/openebs/m-apiserver:ci ports: - containerPort: 5656 env: @@ -104,7 +107,7 @@ spec: # configured as a part of openebs installation. # If "true" a default cstor sparse pool will be configured, if "false" it will not be configured. - name: OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL - value: "true" + value: "false" # OPENEBS_NAMESPACE provides the namespace of this deployment as an # environment variable - name: OPENEBS_NAMESPACE @@ -124,27 +127,27 @@ spec: fieldRef: fieldPath: metadata.name - name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE - value: "openebs/jiva:ci" + value: "quay.io/openebs/jiva:ci" - name: OPENEBS_IO_JIVA_REPLICA_IMAGE - value: "openebs/jiva:ci" + value: "quay.io/openebs/jiva:ci" - name: OPENEBS_IO_JIVA_REPLICA_COUNT value: "3" - name: OPENEBS_IO_CSTOR_TARGET_IMAGE - value: "openebs/cstor-istgt:ci" + value: "quay.io/openebs/cstor-istgt:ci" - name: OPENEBS_IO_CSTOR_POOL_IMAGE - value: "openebs/cstor-pool:ci" + value: "quay.io/openebs/cstor-pool:ci" - name: OPENEBS_IO_CSTOR_POOL_MGMT_IMAGE - value: "openebs/cstor-pool-mgmt:ci" + value: "quay.io/openebs/cstor-pool-mgmt:ci" - name: OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE - value: "openebs/cstor-volume-mgmt:ci" + value: "quay.io/openebs/cstor-volume-mgmt:ci" - name: OPENEBS_IO_VOLUME_MONITOR_IMAGE - value: "openebs/m-exporter:ci" + value: "quay.io/openebs/m-exporter:ci" - name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE - value: "openebs/m-exporter:ci" + value: "quay.io/openebs/m-exporter:ci" # OPENEBS_IO_ENABLE_ANALYTICS if set to true sends anonymous usage # events to Google Analytics - name: OPENEBS_IO_ENABLE_ANALYTICS - value: "true" + value: "false" # OPENEBS_IO_ANALYTICS_PING_INTERVAL can be used to specify the duration (in hours) # for periodic ping events sent to Google Analytics. # Default is 24h. @@ -199,7 +202,7 @@ spec: containers: - name: openebs-provisioner imagePullPolicy: IfNotPresent - image: openebs/openebs-k8s-provisioner:ci + image: quay.io/openebs/openebs-k8s-provisioner:ci env: # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s # based on this address. This is ignored if empty. @@ -252,7 +255,7 @@ spec: serviceAccountName: openebs-maya-operator containers: - name: snapshot-controller - image: openebs/snapshot-controller:ci + image: quay.io/openebs/snapshot-controller:ci imagePullPolicy: IfNotPresent env: - name: OPENEBS_NAMESPACE @@ -273,7 +276,7 @@ spec: #- name: OPENEBS_MAYA_SERVICE_NAME # value: "maya-apiserver-apiservice" - name: snapshot-provisioner - image: openebs/snapshot-provisioner:ci + image: quay.io/openebs/snapshot-provisioner:ci imagePullPolicy: IfNotPresent env: - name: OPENEBS_NAMESPACE @@ -360,8 +363,8 @@ spec: hostNetwork: true containers: - name: node-disk-manager - image: openebs/node-disk-manager-amd64:v0.3.5 - imagePullPolicy: IfNotPresent + image: quay.io/openebs/node-disk-manager-amd64:ci + imagePullPolicy: Always securityContext: privileged: true volumeMounts: @@ -377,6 +380,12 @@ spec: - name: sparsepath mountPath: /var/openebs/sparse env: + # namespace in which NDM is installed will be passed to NDM Daemonset + # as environment variable + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace # pass hostname as env variable using downward API to the NDM container - name: NODE_NAME valueFrom: @@ -417,6 +426,50 @@ spec: hostPath: path: /var/openebs/sparse --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openebs-ndm-operator + namespace: openebs +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + name: openebs-ndm-operator + template: + metadata: + labels: + name: openebs-ndm-operator + openebs.io/component-name: ndm-operator + openebs.io/version: dev + spec: + serviceAccountName: openebs-maya-operator + containers: + - name: node-disk-operator + image: quay.io/openebs/node-disk-operator-amd64:ci + imagePullPolicy: Always + readinessProbe: + exec: + command: + - stat + - /tmp/operator-sdk-ready + initialDelaySeconds: 4 + periodSeconds: 10 + failureThreshold: 1 + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "node-disk-operator" +--- apiVersion: v1 kind: Secret metadata: @@ -450,9 +503,6 @@ kind: Deployment metadata: name: openebs-admission-server namespace: openebs - labels: - app: admission-webhook - openebs.io/component: admission-server spec: replicas: 1 selector: @@ -463,11 +513,12 @@ spec: labels: app: admission-webhook openebs.io/version: dev + openebs.io/component: admission-server spec: serviceAccountName: openebs-maya-operator containers: - name: admission-webhook - image: openebs/admission-server:ci + image: quay.io/openebs/admission-server:ci imagePullPolicy: IfNotPresent args: - -tlsCertFile=/etc/webhook/certs/cert.pem @@ -525,7 +576,7 @@ spec: containers: - name: openebs-provisioner-hostpath imagePullPolicy: Always - image: openebs/provisioner-localpv:ci + image: quay.io/openebs/provisioner-localpv:ci env: # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s # based on this address. This is ignored if empty. diff --git a/tests/artifacts/sc-hostdevice.yaml b/tests/artifacts/sc-hostdevice.yaml index 180a7f1933..55016f5c51 100644 --- a/tests/artifacts/sc-hostdevice.yaml +++ b/tests/artifacts/sc-hostdevice.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: hostdevice + name: openebs-hostdevice annotations: #Define a new CAS Type called `local` #which indicates that Data is stored @@ -12,7 +12,7 @@ metadata: openebs.io/cas-type: local cas.openebs.io/config: | - name: StorageType - value: "storage-device" + value: "device" provisioner: openebs.io/local volumeBindingMode: WaitForFirstConsumer reclaimPolicy: Delete diff --git a/tests/cstor/clone/clone_test.go b/tests/cstor/clone/clone_test.go new file mode 100644 index 0000000000..975004da61 --- /dev/null +++ b/tests/cstor/clone/clone_test.go @@ -0,0 +1,285 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clone + +import ( + "strconv" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + snap "github.com/openebs/maya/pkg/kubernetes/snapshot/v1alpha1" + sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("[cstor] TEST VOLUME CLONE PROVISIONING", func() { + var ( + err error + pvcName = "test-cstor-clone-pvc" + snapName = "test-cstor-clone-snapshot" + clonepvcName = "test-cstor-clone-pvc-cloned" + ) + + BeforeEach(func() { + When("deploying cstor sparse pool", func() { + By("building spc object") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating spc", spcName) + + By("verifying healthy csp count") + cspCount := ops.GetHealthyCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(1), "while checking cstorpool health count") + + By("building a CAS Config with generated SPC name") + CASConfig := strings.Replace(openebsCASConfigValue, "$spcName", spcObj.Name, 1) + CASConfig = strings.Replace(CASConfig, "$count", strconv.Itoa(cstor.ReplicaCount), 1) + annotations[string(apis.CASTypeKey)] = string(apis.CstorVolume) + annotations[string(apis.CASConfigKey)] = CASConfig + + By("building storageclass object") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot(HaveOccurred(), "while building storageclass obj for storageclass {%s}", scName) + + By("creating storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass", scName) + }) + }) + + AfterEach(func() { + By("deleting resources created for cstor volume snapshot provisioning", func() { + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the spc's {%s}", spcName) + + By("deleting storageclass") + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting storageclass", scName) + + }) + }) + + When("cstor pvc with replicacount 1 is created", func() { + It("should create cstor volume target pod", func() { + + By("building a persistentvolumeclaim") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(nsObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("creating cstor persistentvolumeclaim") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying volume target pod count as 1") + sourcetargetLabel := pvcLabel + pvcObj.Name + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, sourcetargetLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + + By("verifying cstorvolume replica count") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(pvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + cvrLabel := pvLabel + pvcObj.Spec.VolumeName + cvrCount := ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, cvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + + By("verifying pvc status as bound") + status := ops.IsPVCBoundEventually(pvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + By("verifying cstorVolume status as healthy") + CstorVolumeLabel := pvLabel + pvcObj.Spec.VolumeName + cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 1) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + + By("building a cstor volume snapshot") + snapObj, err = snap.NewBuilder(). + WithName(snapName). + WithNamespace(nsObj.Name). + WithPVC(pvcName). + Build() + Expect(err).To( + BeNil(), + "while building snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + By("creating cstor volume snapshot") + _, err = ops.SnapClient.WithNamespace(nsObj.Name).Create(snapObj) + Expect(err).To( + BeNil(), + "while creating snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + snaptype := ops.GetSnapshotTypeEventually(snapName) + Expect(snaptype).To(Equal("Ready"), "while checking snapshot type") + + By("builing clone persistentvolumeclaim") + cloneAnnotations := map[string]string{ + "snapshot.alpha.kubernetes.io/snapshot": snapName, + } + + cloneObj, err := pvc.NewBuilder(). + WithName(clonepvcName). + WithAnnotations(cloneAnnotations). + WithNamespace(nsObj.Name). + WithStorageClass(clonescName). + WithAccessModes(accessModes). + WithCapacity(capacity). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building clone pvc {%s} in namespace {%s}", + clonepvcName, + nsObj.Name, + ) + + By("creating clone persistentvolumeclaim") + _, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(cloneObj) + Expect(err).To( + BeNil(), + "while creating clone pvc {%s} in namespace {%s}", + clonepvcName, + nsObj.Name, + ) + + By("verifying clone volume target pod count") + + clonetargetLabel := pvcLabel + clonepvcName + clonePodCount := ops.GetPodRunningCountEventually(openebsNamespace, clonetargetLabel, 1) + Expect(clonePodCount).To(Equal(1), "while checking clone pvc pod count") + + By("verifying clone volumeereplica count") + clonepvcObj, err := ops.PVCClient.WithNamespace(nsObj.Name).Get(clonepvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + clonecvrLabel := pvLabel + clonepvcObj.Spec.VolumeName + cvrCount = ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, clonecvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + + By("verifying clone pvc status as bound") + status = ops.IsPVCBoundEventually(clonepvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + By("deleting clone persistentvolumeclaim") + err = ops.PVCClient.Delete(clonepvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying clone target pod count as 0") + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, clonetargetLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted clone pvc") + clonepvc := ops.IsPVCDeleted(clonepvcName) + Expect(clonepvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if clone cstorvolume is deleted") + CstorVolumeLabel = pvLabel + clonepvcObj.Spec.VolumeName + clonecvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(clonecvCount).To(Equal(true), "while checking cstorvolume count") + + By("deleting cstor volume snapshot") + err = ops.SnapClient.Delete(snapName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + By("verifying deleted snapshot") + snap := ops.IsSnapshotDeleted(snapName) + Expect(snap).To(Equal(true), "while checking for deleted snapshot") + + By("deleting source persistentvolumeclaim") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying source volume target pod count as 0") + + sourcetargetLabel = "openebs.io/persistent-volume-claim=" + pvcName + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, sourcetargetLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted source pvc") + pvc := ops.IsPVCDeleted(pvcName) + Expect(pvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if source cstorvolume is deleted") + CstorVolumeLabel = pvLabel + pvcObj.Spec.VolumeName + cvCount = ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + + }) + }) + +}) diff --git a/tests/cstor/clone/suite_test.go b/tests/cstor/clone/suite_test.go new file mode 100644 index 0000000000..7035848ed5 --- /dev/null +++ b/tests/cstor/clone/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clone + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/cstor" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + snapshot "github.com/openebs/maya/pkg/apis/openebs.io/snapshot/v1alpha1" + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + openebsNamespace = "openebs" + nsName = "test-cstor-clone" + scName = "test-cstor-clone-sc" + clonescName = "openebs-snapshot-promoter" + openebsCASConfigValue = ` +- name: ReplicaCount + value: $count +- name: StoragePoolClaim + value: $spcName +` + openebsProvisioner = "openebs.io/provisioner-iscsi" + spcName = "test-cstor-clone-sparse-pool" + nsObj *corev1.Namespace + scObj *storagev1.StorageClass + spcObj *apis.StoragePoolClaim + pvcObj *corev1.PersistentVolumeClaim + snapObj *snapshot.VolumeSnapshot + pvLabel = "openebs.io/persistent-volume=" + pvcLabel = "openebs.io/persistent-volume-claim=" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + annotations = map[string]string{} +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test cstor volume clone provisioning") +} + +func init() { + cstor.ParseFlags() + +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(cstor.KubeConfigPath)).VerifyOpenebs(1) + var err error + + By("building a namespace") + nsObj, err = ns.NewBuilder(). + WithGenerateName(nsName). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + + By("creating a namespace") + nsObj, err = ops.NSClient.Create(nsObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err := ops.NSClient.Delete(nsObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", nsObj.Name) + +}) diff --git a/tests/cstor/cstor.go b/tests/cstor/cstor.go new file mode 100644 index 0000000000..a8d02ad736 --- /dev/null +++ b/tests/cstor/cstor.go @@ -0,0 +1,36 @@ +/* +:q +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cstor + +import "flag" + +var ( + // KubeConfigPath is the path to + // the kubeconfig provided at runtime + KubeConfigPath string + // ReplicaCount is the value of + // replica count provided at runtime + ReplicaCount int + // PoolCount is the value of + // max pool count in spc + PoolCount int +) + +// ParseFlags gets the flag values at run time +func ParseFlags() { + flag.StringVar(&KubeConfigPath, "kubeconfig", "", "path to kubeconfig to invoke kubernetes API calls") + flag.IntVar(&ReplicaCount, "cstor-replicas", 1, "value of replica count") + flag.IntVar(&PoolCount, "cstor-maxpools", 1, "value of maxpool count") +} diff --git a/tests/cstor/pool/negative/invalid_config_test.go b/tests/cstor/pool/negative/invalid_config_test.go new file mode 100644 index 0000000000..ae40ecf0b1 --- /dev/null +++ b/tests/cstor/pool/negative/invalid_config_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negative + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("[cstor] [-ve] TEST INVALID STORAGEPOOLCLAIM", func() { + var ( + err error + ) + + AfterEach(func() { + + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the storagepoolclaim {%s}", spcObj.Name) + + time.Sleep(5 * time.Second) + }) + + When("creating storagepoolclaim with invalid disk type", func() { + It("should not create any cstorpool", func() { + + By("building storagepoolclaim with invalid disk type") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType("invalid-disk-type"). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating above storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating storagepoolclaim {%s}", spcObj.Name) + + By("verifying cstorpool count as 0") + cspCount := ops.GetCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(0), "while checking cstorpool count") + + }) + }) + + When("creating storagepoolclaim with invalid pool type", func() { + It("should not create any cstorpool", func() { + + By("building a storagepoolclaim with invalid pool type") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string("invalid-pool-type")). + Build().Object + + By("creating above storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating storagepoolclaim {%s}", spcObj.Name) + + By("verifying cstorpool count as 0") + cspCount := ops.GetCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(0), "while checking cstorpool count") + + }) + }) + + When("creating storagepoolclaim with invalid pool count", func() { + It("should not create any cstorpool", func() { + + By("building storagepoolclaim with invalid pool count") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(-1). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating above storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating storagepoolclaim {%s}", spcObj.Name) + + By("verifying cstorpool count as 0") + cspCount := ops.GetCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(0), "while checking cstorpool count") + + }) + }) + +}) diff --git a/tests/cstor/pool/negative/invalid_storageclass_test.go b/tests/cstor/pool/negative/invalid_storageclass_test.go new file mode 100644 index 0000000000..928fa0f0e8 --- /dev/null +++ b/tests/cstor/pool/negative/invalid_storageclass_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negative + +import ( + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("[cstor] [-ve] TEST INVALID STORAGECLASS", func() { + var ( + err error + pvcName = "cstor-volume-claim" + openebsCASConfigValue = ` +- name: ReplicaCount + value: $count +- name: StoragePoolClaim + value: test-cstor-provision-sparse-pool-auto +` + ) + + BeforeEach(func() { + When(" creating a cstor based volume", func() { + + By("building storagepoolclaim") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating above storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating storagepoolclaim {%s}", spcObj.Name) + + By("verifying healthy csp count") + cspCount := ops.GetHealthyCSPCountEventually(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(true), "while checking cstorpool health status") + + }) + }) + + AfterEach(func() { + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the storagepoolclaim {%s}", spcObj.Name) + + time.Sleep(10 * time.Second) + }) + + When("creating storageclass with invalid CASConfig", func() { + It("should not create any pvc pods", func() { + + By("building a CAS Config") + CASConfig := strings.Replace( + openebsCASConfigValue, + "$count", + strconv.Itoa(cstor.ReplicaCount), + 1, + ) + annotations[string(apis.CASTypeKey)] = string(apis.CstorVolume) + // adding invalid character to casconfig + annotations[string(apis.CASConfigKey)] = CASConfig + ":" + + By("building storageclass with invalid CASConfig") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building storageclass obj for storageclass {%s}", + scObj.GenerateName, + ) + + By("creating above storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.Name) + + By("building a pvc") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + By("creating above pvc") + pvcObj, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + namespace, + ) + + By("verifying target pod count as 0") + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetLabel, 1) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("deleting above pvc") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while delete=ing pvc {%s}", pvcName) + + By("deleting storageclass") + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) + + }) + }) + +}) diff --git a/tests/cstor/pool/negative/suite_test.go b/tests/cstor/pool/negative/suite_test.go new file mode 100644 index 0000000000..aebbd9b414 --- /dev/null +++ b/tests/cstor/pool/negative/suite_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negative + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/cstor" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + openebsNamespace = "openebs" + namespace = "cstor-invalidconfig" + scName = "cstor-volume-test" + openebsProvisioner = "openebs.io/provisioner-iscsi" + spcName = "sparse-pool-claim" + namespaceObj *corev1.Namespace + scObj *storagev1.StorageClass + spcObj *apis.StoragePoolClaim + pvcObj *corev1.PersistentVolumeClaim + targetLabel = "openebs.io/target=cstor-target" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + annotations = map[string]string{} +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test cstor invalid config") +} + +func init() { + cstor.ParseFlags() +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(cstor.KubeConfigPath)).VerifyOpenebs(1) + var err error + + By("building a namespace") + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespaceObj.GenerateName) + + By("creating a namespace") + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.Name) + +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err := ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) + +}) diff --git a/tests/cstor/snapshot/snapshot_test.go b/tests/cstor/snapshot/snapshot_test.go new file mode 100644 index 0000000000..8945dc5731 --- /dev/null +++ b/tests/cstor/snapshot/snapshot_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshot + +import ( + "strconv" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + snap "github.com/openebs/maya/pkg/kubernetes/snapshot/v1alpha1" + sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("[cstor] TEST SNAPSHOT PROVISIONING", func() { + var ( + err error + pvcName = "test-cstor-snap-pvc" + snapName = "test-cstor-snap-snapshot" + ) + + BeforeEach(func() { + When("deploying cstor sparse pool", func() { + By("building spc object") + spcObj = spc.NewBuilder(). + WithGenerateName(spcName). + WithDiskType(string(apis.TypeSparseCPV)). + WithMaxPool(cstor.PoolCount). + WithOverProvisioning(false). + WithPoolType(string(apis.PoolTypeStripedCPV)). + Build().Object + + By("creating storagepoolclaim") + spcObj, err = ops.SPCClient.Create(spcObj) + Expect(err).To(BeNil(), "while creating spc", spcName) + + By("verifying healthy csp count") + cspCount := ops.GetHealthyCSPCount(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(1), "while checking cstorpool health count") + + By("building a CAS Config with generated SPC name") + CASConfig := strings.Replace(openebsCASConfigValue, "$spcName", spcObj.Name, 1) + CASConfig = strings.Replace(CASConfig, "$count", strconv.Itoa(cstor.ReplicaCount), 1) + annotations[string(apis.CASTypeKey)] = string(apis.CstorVolume) + annotations[string(apis.CASConfigKey)] = CASConfig + + By("building storageclass object") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot(HaveOccurred(), "while building storageclass obj for storageclass {%s}", scName) + + By("creating storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass", scName) + + }) + }) + + AfterEach(func() { + By("deleting resources created for cstor volume snapshot provisioning", func() { + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the spc's {%s}", spcName) + + By("deleting storageclass") + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting storageclass", scName) + + }) + }) + + When("cstor pvc with replicacount 1 is created", func() { + It("should create cstor volume target pod", func() { + + By("building a persistentvolumeclaim") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(nsObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("creating cstor persistentvolumeclaim") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying volume target pod count as 1") + targetVolumeLabel := pvcLabel + pvcObj.Name + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + + By("verifying cstorvolume replica count") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(pvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + cvrLabel := pvLabel + pvcObj.Spec.VolumeName + cvrCount := ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, cvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + + By("verifying pvc status as bound") + status := ops.IsPVCBoundEventually(pvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + By("building a cstor volume snapshot") + snapObj, err = snap.NewBuilder(). + WithName(snapName). + WithNamespace(nsObj.Name). + WithPVC(pvcName). + Build() + Expect(err).To( + BeNil(), + "while building snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + By("creating cstor volume snapshot") + _, err = ops.SnapClient.WithNamespace(nsObj.Name).Create(snapObj) + Expect(err).To( + BeNil(), + "while creating snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + By("verifying snapshot status as ready") + snaptype := ops.GetSnapshotTypeEventually(snapName) + Expect(snaptype).To(Equal("Ready"), "while checking snapshot type") + + By("deleting cstor volume snapshot") + err = ops.SnapClient.Delete(snapName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting snapshot {%s} in namespace {%s}", + snapName, + nsObj.Name, + ) + + By("verifying deleted snapshot") + snap := ops.IsSnapshotDeleted(snapName) + Expect(snap).To(Equal(true), "while checking for deleted snapshot") + + By("deleting above pvc") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying target pod count as 0") + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying deleted pvc") + pvc := ops.IsPVCDeleted(pvcName) + Expect(pvc).To(Equal(true), "while trying to get deleted pvc") + + By("verifying if cstorvolume is deleted") + CstorVolumeLabel := pvLabel + pvcObj.Spec.VolumeName + cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") + }) + }) + +}) diff --git a/tests/cstor/snapshot/suite_test.go b/tests/cstor/snapshot/suite_test.go new file mode 100644 index 0000000000..13ef8a1538 --- /dev/null +++ b/tests/cstor/snapshot/suite_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshot + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/cstor" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + snapshot "github.com/openebs/maya/pkg/apis/openebs.io/snapshot/v1alpha1" + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + openebsNamespace = "openebs" + nsName = "test-cstor-snap" + scName = "test-cstor-snap-sc" + openebsCASConfigValue = ` +- name: ReplicaCount + value: $count +- name: StoragePoolClaim + value: $spcName +` + openebsProvisioner = "openebs.io/provisioner-iscsi" + spcName = "test-cstor-snap-sparse-pool" + nsObj *corev1.Namespace + scObj *storagev1.StorageClass + spcObj *apis.StoragePoolClaim + pvcObj *corev1.PersistentVolumeClaim + snapObj *snapshot.VolumeSnapshot + pvLabel = "openebs.io/persistent-volume=" + pvcLabel = "openebs.io/persistent-volume-claim=" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + annotations = map[string]string{} +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test cstor volume snapshot provisioning") +} + +func init() { + cstor.ParseFlags() + +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(cstor.KubeConfigPath)).VerifyOpenebs(1) + var err error + + By("building a namespace") + nsObj, err = ns.NewBuilder(). + WithGenerateName(nsName). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + + By("creating a namespace") + nsObj, err = ops.NSClient.Create(nsObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err := ops.NSClient.Delete(nsObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", nsObj.Name) + +}) diff --git a/tests/cstor/volume/provision_test.go b/tests/cstor/volume/provision_test.go index 2f6bafa82d..8b3596b499 100644 --- a/tests/cstor/volume/provision_test.go +++ b/tests/cstor/volume/provision_test.go @@ -17,6 +17,9 @@ limitations under the License. package volume import ( + "strconv" + "strings" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -24,6 +27,7 @@ import ( pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" + "github.com/openebs/maya/tests/cstor" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -35,52 +39,54 @@ var _ = Describe("[cstor] TEST VOLUME PROVISIONING", func() { BeforeEach(func() { When(" creating a cstor based volume", func() { - By("building object of storageclass") - scObj, err = sc.NewBuilder(). - WithName(scName). - WithAnnotations(annotations). - WithProvisioner(openebsProvisioner).Build() - Expect(err).ShouldNot(HaveOccurred(), "while building storageclass obj for storageclass {%s}", scName) - - By("creating storageclass") - _, err = ops.SCClient.Create(scObj) - Expect(err).To(BeNil(), "while creating storageclass", scName) - By("building spc object") spcObj = spc.NewBuilder(). - WithName(spcName). + WithGenerateName(spcName). WithDiskType(string(apis.TypeSparseCPV)). - WithMaxPool(1). + WithMaxPool(cstor.PoolCount). WithOverProvisioning(false). WithPoolType(string(apis.PoolTypeStripedCPV)). Build().Object By("creating storagepoolclaim") - _, err = ops.SPCClient.Create(spcObj) + spcObj, err = ops.SPCClient.Create(spcObj) Expect(err).To(BeNil(), "while creating spc", spcName) By("verifying healthy csp count") - cspCount := ops.GetHealthyCSPCount(spcName, 1) - Expect(cspCount).To(Equal(1), "while checking cstorpool health count") + cspCount := ops.GetHealthyCSPCountEventually(spcObj.Name, cstor.PoolCount) + Expect(cspCount).To(Equal(true), "while checking cstorpool health status") + + By("building a CAS Config with generated SPC name") + CASConfig := strings.Replace(openebsCASConfigValue, "$spcName", spcObj.Name, 1) + CASConfig = strings.Replace(CASConfig, "$count", strconv.Itoa(cstor.ReplicaCount), 1) + annotations[string(apis.CASTypeKey)] = string(apis.CstorVolume) + annotations[string(apis.CASConfigKey)] = CASConfig + + By("building object of storageclass") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot(HaveOccurred(), "while building storageclass obj for storageclass {%s}", scName) + + By("creating storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass", scName) }) }) AfterEach(func() { By("deleting resources created for testing cstor volume provisioning", func() { + + By("deleting storagepoolclaim") + _, err = ops.SPCClient.Delete(spcObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting the spc's {%s}", spcName) + By("deleting storageclass") - err = ops.SCClient.Delete(scName, &metav1.DeleteOptions{}) + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), "while deleting storageclass", scName) - By("listing spc") - spcList, err = ops.SPCClient.List(metav1.ListOptions{}) - Expect(err).To(BeNil(), "while listing spc clients", spcList) - - By("deleting spc") - for _, spc := range spcList.Items { - _, err = ops.SPCClient.Delete(spc.Name, &metav1.DeleteOptions{}) - Expect(err).To(BeNil(), "while deleting the spc's", spc) - } }) }) @@ -90,30 +96,44 @@ var _ = Describe("[cstor] TEST VOLUME PROVISIONING", func() { By("building a pvc") pvcObj, err = pvc.NewBuilder(). WithName(pvcName). - WithNamespace(nsName). - WithStorageClass(scName). + WithNamespace(nsObj.Name). + WithStorageClass(scObj.Name). WithAccessModes(accessModes). WithCapacity(capacity).Build() Expect(err).ShouldNot( HaveOccurred(), "while building pvc {%s} in namespace {%s}", pvcName, - nsName, + nsObj.Name, ) By("creating above pvc") - _, err = ops.PVCClient.WithNamespace(nsName).Create(pvcObj) + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Create(pvcObj) Expect(err).To( BeNil(), "while creating pvc {%s} in namespace {%s}", pvcName, - nsName, + nsObj.Name, ) By("verifying target pod count as 1") - controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetLabel, 1) + targetVolumeLabel := pvcLabel + pvcObj.Name + controllerPodCount := ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 1) Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + pvcObj, err = ops.PVCClient.WithNamespace(nsObj.Name).Get(pvcName, metav1.GetOptions{}) + Expect(err).To( + BeNil(), + "while getting pvc {%s} in namespace {%s}", + pvcName, + nsObj.Name, + ) + + By("verifying cstorvolume replica count") + cvrLabel := pvLabel + pvcObj.Spec.VolumeName + cvrCount := ops.GetCstorVolumeReplicaCountEventually(openebsNamespace, cvrLabel, cstor.ReplicaCount) + Expect(cvrCount).To(Equal(true), "while checking cstorvolume replica count") + By("verifying pvc status as bound") status := ops.IsPVCBound(pvcName) Expect(status).To(Equal(true), "while checking status equal to bound") @@ -124,11 +144,11 @@ var _ = Describe("[cstor] TEST VOLUME PROVISIONING", func() { BeNil(), "while deleting pvc {%s} in namespace {%s}", pvcName, - nsName, + nsObj.Name, ) By("verifying target pod count as 0") - controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, targetLabel, 0) + controllerPodCount = ops.GetPodRunningCountEventually(openebsNamespace, targetVolumeLabel, 0) Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") By("verifying deleted pvc") @@ -136,9 +156,9 @@ var _ = Describe("[cstor] TEST VOLUME PROVISIONING", func() { Expect(pvc).To(Equal(true), "while trying to get deleted pvc") By("verifying if cstorvolume is deleted") - CstorVolumeLabel := "openebs.io/persistent-volume=" + pvcObj.Spec.VolumeName - cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, CstorVolumeLabel, 0) - Expect(cvCount).To(Equal(0), "while checking cstorvolume count") + cvLabel := pvLabel + pvcObj.Spec.VolumeName + cvCount := ops.GetCstorVolumeCountEventually(openebsNamespace, cvLabel, 0) + Expect(cvCount).To(Equal(true), "while checking cstorvolume count") }) }) diff --git a/tests/cstor/volume/suite_test.go b/tests/cstor/volume/suite_test.go index f4fbd3cad7..e49e9c6cf3 100644 --- a/tests/cstor/volume/suite_test.go +++ b/tests/cstor/volume/suite_test.go @@ -14,14 +14,12 @@ limitations under the License. package volume import ( - "flag" - "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/openebs/maya/tests" - "github.com/openebs/maya/tests/artifacts" + "github.com/openebs/maya/tests/cstor" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" @@ -36,23 +34,25 @@ import ( var ( kubeConfigPath string openebsNamespace = "openebs" - nsName = "cstor-provision" - scName = "cstor-volume" - openebsCASConfigValue = "- name: ReplicaCount\n value: 1\n- name: StoragePoolClaim\n value: sparse-pool-auto" - openebsProvisioner = "openebs.io/provisioner-iscsi" - spcName = "sparse-pool-auto" - nsObj *corev1.Namespace - scObj *storagev1.StorageClass - spcObj *apis.StoragePoolClaim - pvcObj *corev1.PersistentVolumeClaim - spcList *apis.StoragePoolClaimList - targetLabel = "openebs.io/target=cstor-target" - accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - capacity = "5G" - annotations = map[string]string{ - string(apis.CASTypeKey): string(apis.CstorVolume), - string(apis.CASConfigKey): openebsCASConfigValue, - } + nsName = "test-cstor-volume" + scName = "test-cstor-volume-sc" + openebsCASConfigValue = ` +- name: ReplicaCount + value: $count +- name: StoragePoolClaim + value: $spcName +` + openebsProvisioner = "openebs.io/provisioner-iscsi" + spcName = "test-cstor-provision-sparse-pool-auto" + nsObj *corev1.Namespace + scObj *storagev1.StorageClass + spcObj *apis.StoragePoolClaim + pvcObj *corev1.PersistentVolumeClaim + pvLabel = "openebs.io/persistent-volume=" + pvcLabel = "openebs.io/persistent-volume-claim=" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + annotations = map[string]string{} ) func TestSource(t *testing.T) { @@ -61,46 +61,31 @@ func TestSource(t *testing.T) { } func init() { - flag.StringVar(&kubeConfigPath, "kubeconfig", "", "path to kubeconfig to invoke kubernetes API calls") + cstor.ParseFlags() } var ops *tests.Operations var _ = BeforeSuite(func() { - ops = tests.NewOperations(tests.WithKubeConfigPath(kubeConfigPath)) + ops = tests.NewOperations(tests.WithKubeConfigPath(cstor.KubeConfigPath)).VerifyOpenebs(1) var err error - By("waiting for maya-apiserver pod to come into running state") - podCount := ops.GetPodRunningCountEventually( - string(artifacts.OpenebsNamespace), - string(artifacts.MayaAPIServerLabelSelector), - 1, - ) - Expect(podCount).To(Equal(1)) - - By("waiting for openebs-provisioner pod to come into running state") - podCount = ops.GetPodRunningCountEventually( - string(artifacts.OpenebsNamespace), - string(artifacts.OpenEBSProvisionerLabelSelector), - 1, - ) - Expect(podCount).To(Equal(1)) By("building a namespace") nsObj, err = ns.NewBuilder(). - WithName(nsName). + WithGenerateName(nsName). APIObject() Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) By("creating a namespace") - _, err = ops.NSClient.Create(nsObj) - Expect(err).To(BeNil(), "while creating storageclass {%s}", nsObj.Name) + nsObj, err = ops.NSClient.Create(nsObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) }) var _ = AfterSuite(func() { By("deleting namespace") - err := ops.NSClient.Delete(nsName, &metav1.DeleteOptions{}) + err := ops.NSClient.Delete(nsObj.Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), "while deleting namespace {%s}", nsObj.Name) }) diff --git a/tests/jiva/clone/provision_test.go b/tests/jiva/clone/provision_test.go index 03dec42546..d5f22b5d3e 100644 --- a/tests/jiva/clone/provision_test.go +++ b/tests/jiva/clone/provision_test.go @@ -21,93 +21,161 @@ import ( . "github.com/onsi/gomega" "github.com/openebs/maya/tests/jiva" + container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" snap "github.com/openebs/maya/pkg/kubernetes/snapshot/v1alpha1" + volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( - replicaLabel = "openebs.io/replica=jiva-replica" - ctrlLabel = "openebs.io/controller=jiva-controller" - accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - capacity = "5G" - pvcObj *corev1.PersistentVolumeClaim - cloneObj *corev1.PersistentVolumeClaim - cloneLable = "openebs.io/persistent-volume-claim=jiva-clone" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + pvcObj *corev1.PersistentVolumeClaim + cloneObj *corev1.PersistentVolumeClaim + cloneLable = "openebs.io/persistent-volume-claim=jiva-clone" + podObj, clonePodObj *corev1.Pod ) var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { var ( - pvcName = "jiva-pvc" - snapName = "jiva-snapshot" - cloneName = "jiva-clone" + pvcName = "jiva-pvc" + snapName = "jiva-snapshot" + cloneName = "jiva-clone" + appName = "busybox-jiva" + cloneAppName = "busybox-jiva-clone" ) - When("jiva pvc with replicacount n is created", func() { + When("pvc with replicacount n is created", func() { It("should create 1 controller pod and n replica pod", func() { By("building a pvc") pvcObj, err = pvc.NewBuilder(). WithName(pvcName). - WithNamespace(nsName). - WithStorageClass(scName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scObj.Name). WithAccessModes(accessModes). WithCapacity(capacity).Build() Expect(err).ShouldNot( HaveOccurred(), "while building pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("creating above pvc") - _, err = ops.PVCClient.WithNamespace(nsName).Create(pvcObj) + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name). + Create(pvcObj) Expect(err).To( BeNil(), "while creating pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 1) - Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + controllerPodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + jiva.CtrlLabel, + 1, + ) + Expect(controllerPodCount).To( + Equal(1), + "while checking controller pod count", + ) By("verifying replica pod count") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, jiva.ReplicaCount) - Expect(replicaPodCount).To(Equal(jiva.ReplicaCount), "while checking replica pod count") + replicaPodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + jiva.ReplicaLabel, + jiva.ReplicaCount, + ) + Expect(replicaPodCount).To( + Equal(jiva.ReplicaCount), + "while checking replica pod count", + ) By("verifying status as bound") - status := ops.IsPVCBound(pvcName) + status := ops.IsPVCBoundEventually(pvcName) Expect(status).To(Equal(true), "while checking status equal to bound") }) }) + When("creating application pod with above pvc as volume", func() { + It("should create a running pod", func() { + podObj, err = pod.NewBuilder(). + WithName(appName). + WithNamespace(namespaceObj.Name). + WithContainerBuilder( + container.NewBuilder(). + WithName("busybox"). + WithImage("busybox"). + WithCommand( + []string{ + "sh", + "-c", + "date > /mnt/store1/date.txt; sync; sleep 5; sync; tail -f /dev/null;", + }, + ). + WithVolumeMounts( + []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "demo-vol1", + MountPath: "/mnt/store1", + }, + }, + ), + ). + WithVolumeBuilder( + volume.NewBuilder(). + WithName("demo-vol1"). + WithPVCSource(pvcName), + ). + Build() + Expect(err).ShouldNot(HaveOccurred(), "while building pod {%s}", appName) + + By("creating pod with above pvc as volume") + podObj, err = ops.PodClient.WithNamespace(namespaceObj.Name).Create(podObj) + Expect(err).ShouldNot( + HaveOccurred(), + "while creating pod {%s} in namespace {%s}", + appName, + namespaceObj.Name, + ) + + By("verifying pod is running") + status := ops.IsPodRunningEventually(namespaceObj.Name, appName) + Expect(status).To(Equal(true), "while checking status of pod {%s}", appName) + + }) + }) + When("jiva snapshot is created", func() { It("should create a snapshot with type ready", func() { By("building a snapshot") snapObj, err = snap.NewBuilder(). WithName(snapName). - WithNamespace(nsName). + WithNamespace(namespaceObj.Name). WithPVC(pvcName). Build() Expect(err).To( BeNil(), "while building snapshot {%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("creating above snapshot") - _, err = ops.SnapClient.WithNamespace(nsName).Create(snapObj) + _, err = ops.SnapClient.WithNamespace(namespaceObj.Name).Create(snapObj) Expect(err).To( BeNil(), "while creating snapshot{%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("verifying type as ready") @@ -128,8 +196,8 @@ var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { cloneObj, err = pvc.NewBuilder(). WithName(cloneName). WithAnnotations(cloneAnnotations). - WithNamespace(nsName). - WithStorageClass(scName). + WithNamespace(namespaceObj.Name). + WithStorageClass(openebsCloneStorageclass). WithAccessModes(accessModes). WithCapacity(capacity). Build() @@ -137,21 +205,144 @@ var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { HaveOccurred(), "while building clone pvc {%s} in namespace {%s}", cloneName, - nsName, + namespaceObj.Name, ) By("creating above clone pvc") - _, err = ops.PVCClient.WithNamespace(nsName).Create(cloneObj) + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(cloneObj) Expect(err).To( BeNil(), "while creating clone pvc {%s} in namespace {%s}", cloneName, - nsName, + namespaceObj.Name, ) By("verifying clone pod count") - clonePodCount := ops.GetPodRunningCountEventually(nsName, cloneLable, jiva.ReplicaCount+1) - Expect(clonePodCount).To(Equal(jiva.ReplicaCount+1), "while checking clone pvc pod count") + clonePodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + cloneLable, + jiva.ReplicaCount+1, + ) + Expect(clonePodCount).To( + Equal(jiva.ReplicaCount+1), + "while checking clone pvc pod count", + ) + + By("verifying status as bound") + status := ops.IsPVCBound(cloneName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + }) + }) + + When("creating application pod with above clone pvc as volume", func() { + It("should create a running pod", func() { + clonePodObj, err = pod.NewBuilder(). + WithName(cloneAppName). + WithNamespace(namespaceObj.Name). + WithContainerBuilder( + container.NewBuilder(). + WithName("busybox"). + WithImage("busybox"). + WithCommand( + []string{ + "sh", + "-c", + "tail -f /dev/null", + }, + ). + WithVolumeMounts( + []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "demo-vol1", + MountPath: "/mnt/store1", + }, + }, + ), + ). + WithVolumeBuilder( + volume.NewBuilder(). + WithName("demo-vol1"). + WithPVCSource(cloneName), + ). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pod {%s}", + cloneAppName, + ) + + By("creating pod with above pvc as volume") + clonePodObj, err = ops.PodClient.WithNamespace(namespaceObj.Name). + Create(clonePodObj) + Expect(err).ShouldNot( + HaveOccurred(), + "while creating pod {%s} in namespace {%s}", + cloneAppName, + namespaceObj.Name, + ) + + By("verifying pod is running") + status := ops.IsPodRunningEventually(namespaceObj.Name, cloneAppName) + Expect(status).To( + Equal(true), + "while checking status of pod {%s}", + cloneAppName, + ) + + }) + }) + + When("verifying data consistency in pvc and clone pvc", func() { + It("should have consistent data between the two pvcs", func() { + By("fetching data from original pvc") + podOutput, err := ops.PodClient.WithNamespace(namespaceObj.Name). + Exec( + podObj.Name, + &corev1.PodExecOptions{ + Command: []string{ + "sh", + "-c", + "md5sum mnt/store1/date.txt", + }, + Container: "busybox", + Stdin: false, + Stdout: true, + Stderr: true, + }, + ) + Expect(err).ShouldNot(HaveOccurred(), "while exec in application pod") + + By("fetching data from clone pvc") + clonePodOutput, err := ops.PodClient.WithNamespace(namespaceObj.Name). + Exec( + clonePodObj.Name, + &corev1.PodExecOptions{ + Command: []string{ + "sh", + "-c", + "md5sum mnt/store1/date.txt", + }, + Container: "busybox", + Stdin: false, + Stdout: true, + Stderr: true, + }, + ) + Expect(err).ShouldNot(HaveOccurred(), "while exec in clone application pod") + + By("veryfing data consistency") + Expect(podOutput).To(Equal(clonePodOutput), "while checking data consistency") + + By("deleting application pod") + err = ops.PodClient.WithNamespace(namespaceObj.Name). + Delete(podObj.Name, &metav1.DeleteOptions{}) + Expect(err).ShouldNot(HaveOccurred(), "while deleting application pod") + + By("deleting clone application pod") + err = ops.PodClient.WithNamespace(namespaceObj.Name). + Delete(clonePodObj.Name, &metav1.DeleteOptions{}) + Expect(err).ShouldNot(HaveOccurred(), "while deleting clone application pod") }) }) @@ -160,16 +351,21 @@ var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { It("should remove clone pvc pods", func() { By("deleting above clone pvc") - err := ops.PVCClient.Delete(cloneName, &metav1.DeleteOptions{}) + err := ops.PVCClient.WithNamespace(namespaceObj.Name). + Delete(cloneName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting clone pvc {%s} in namespace {%s}", cloneName, - nsName, + namespaceObj.Name, ) By("verifying clone pvc pods as 0") - clonePodCount := ops.GetPodRunningCountEventually(nsName, cloneLable, 0) + clonePodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + cloneLable, + 0, + ) Expect(clonePodCount).To(Equal(0), "while checking clone pvc pod count") By("verifying deleted clone pvc") @@ -188,7 +384,7 @@ var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { BeNil(), "while deleting snapshot {%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("verifying deleted snapshot") @@ -202,20 +398,29 @@ var _ = Describe("[jiva] TEST JIVA CLONE CREATION", func() { It("should not have any jiva controller and replica pods", func() { By("deleting above pvc") - err := ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + err := ops.PVCClient.WithNamespace(namespaceObj.Name). + Delete(pvcName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count as 0") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 0) + controllerPodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + jiva.CtrlLabel, + 0, + ) Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") By("verifying replica pod count as 0") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, 0) + replicaPodCount := ops.GetPodRunningCountEventually( + namespaceObj.Name, + jiva.ReplicaLabel, + 0, + ) Expect(replicaPodCount).To(Equal(0), "while checking replica pod count") By("verifying deleted pvc") diff --git a/tests/jiva/clone/suite_test.go b/tests/jiva/clone/suite_test.go index 6e31adb3d1..4e911ea062 100644 --- a/tests/jiva/clone/suite_test.go +++ b/tests/jiva/clone/suite_test.go @@ -39,15 +39,16 @@ import ( ) var ( - nsObj *corev1.Namespace - snapObj *snapshot.VolumeSnapshot - scObj *storagev1.StorageClass - nsName = "jiva-clone-ns" - scName = "jiva-clone-sc" - openebsProvisioner = "openebs.io/provisioner-iscsi" - openebsCASConfigValue = "- name: ReplicaCount\n Value: " - annotations = map[string]string{} - err error + namespaceObj *corev1.Namespace + snapObj *snapshot.VolumeSnapshot + scObj *storagev1.StorageClass + namespace = "jiva-clone-ns" + scName = "jiva-clone-sc" + openebsProvisioner = "openebs.io/provisioner-iscsi" + openebsCASConfigValue = "- name: ReplicaCount\n Value: " + openebsCloneStorageclass = "openebs-snapshot-promoter" + annotations = map[string]string{} + err error ) func TestSource(t *testing.T) { @@ -93,35 +94,35 @@ var _ = BeforeSuite(func() { Expect(podCount).To(Equal(1)) By("building a namespace") - nsObj, err = ns.NewBuilder(). - WithName(nsName). + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). APIObject() - Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespace) By("building a storageclass") scObj, err = sc.NewBuilder(). - WithName(scName). + WithGenerateName(scName). WithAnnotations(annotations). WithProvisioner(openebsProvisioner).Build() Expect(err).ShouldNot(HaveOccurred(), "while building storageclass {%s}", scName) By("creating a namespace") - _, err = ops.NSClient.Create(nsObj) - Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) By("creating a storageclass") - _, err = ops.SCClient.Create(scObj) - Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.Name) + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.GenerateName) }) var _ = AfterSuite(func() { By("deleting storageclass") - err := ops.SCClient.Delete(scName, &metav1.DeleteOptions{}) + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) By("deleting namespace") - err = ops.NSClient.Delete(nsName, &metav1.DeleteOptions{}) - Expect(err).To(BeNil(), "while deleting namespace {%s}", nsName) + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) }) diff --git a/tests/jiva/jiva.go b/tests/jiva/jiva.go index 4ea5e79c5a..13fac0d189 100644 --- a/tests/jiva/jiva.go +++ b/tests/jiva/jiva.go @@ -22,6 +22,10 @@ var ( // ReplicaCount is the value of // replica count provided at runtime ReplicaCount int + // ReplicaLabel is the label for replica pods + ReplicaLabel = "openebs.io/replica=jiva-replica" + // CtrlLabel is the label for controller pod + CtrlLabel = "openebs.io/controller=jiva-controller" ) // ParseFlags gets the flag values at run time diff --git a/tests/jiva/node-stickiness/install_resource.go b/tests/jiva/node-stickiness/install_resource.go deleted file mode 100644 index 143a9151b4..0000000000 --- a/tests/jiva/node-stickiness/install_resource.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright © 2018-2019 The OpenEBS Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nodestickiness - -import ( - . "github.com/onsi/gomega" - sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" - unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" - "github.com/openebs/maya/tests/artifacts" - installer "github.com/openebs/maya/tests/artifacts/installer/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NodeStickyInstaller holds the required objects for installation of test related resource -type NodeStickyInstaller struct { - *installer.DefaultInstaller -} - -const ( - // defaultTimeOut is the default time in seconds - // for Eventually block - defaultTimeOut int = 500 - // defaultPollingInterval is the default polling - // time in seconds for the Eventually block - defaultPollingInterval int = 10 -) - -// NewNodeStickyInstallerForArtifacts defines new instance of NodeStickyInstaller -func NewNodeStickyInstallerForArtifacts(artifact artifacts.Artifact, opts ...unstruct.KubeclientBuildOption) *NodeStickyInstaller { - n := NodeStickyInstaller{} - // Extracting artifact unstructured - artifactUnstruct, err := artifacts.GetArtifactUnstructured(artifact) - Expect(err).ShouldNot(HaveOccurred()) - n.DefaultInstaller, err = installer. - BuilderForObject(artifactUnstruct). - WithKubeClient(opts...). - Build() - Expect(err).ShouldNot(HaveOccurred()) - return &n -} - -// GetInstallerInstance builds the NodeStickyInstaller instance with installer object -func (n *NodeStickyInstaller) GetInstallerInstance() *installer.DefaultInstaller { - return n.DefaultInstaller -} - -// IsSCDeployed checks whether sc is present in the cluster or not -func IsSCDeployed(name string) bool { - Eventually(func() bool { - storageClass, err := sc.NewKubeClient().Get(name, metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - if storageClass != nil { - return true - } - return false - }, - defaultTimeOut, defaultPollingInterval). - Should(BeTrue(), "StorageClass should present") - return true -} diff --git a/tests/jiva/node-stickiness/jiva_pvc_resource.yaml b/tests/jiva/node-stickiness/jiva_pvc_resource.yaml deleted file mode 100644 index 8b35c63a3d..0000000000 --- a/tests/jiva/node-stickiness/jiva_pvc_resource.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: jiva-vol1-1r-claim - namespace: jiva-test -spec: - storageClassName: jiva-single-replica - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5G diff --git a/tests/jiva/node-stickiness/jiva_sc_resource.yaml b/tests/jiva/node-stickiness/jiva_sc_resource.yaml deleted file mode 100644 index f01fa044a8..0000000000 --- a/tests/jiva/node-stickiness/jiva_sc_resource.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Creates jiva-single-replica storage class -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: jiva-single-replica - annotations: - openebs.io/cas-type: jiva - cas.openebs.io/config: | - - name: ReplicaCount - value: "1" -provisioner: openebs.io/provisioner-iscsi ---- diff --git a/tests/jiva/node-stickiness/node_stickiness_test.go b/tests/jiva/node-stickiness/node_stickiness_test.go new file mode 100644 index 0000000000..3d1ebd3ee3 --- /dev/null +++ b/tests/jiva/node-stickiness/node_stickiness_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodestickiness + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" + "github.com/openebs/maya/tests/jiva" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + times = 5 + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5G" + pvcObj *corev1.PersistentVolumeClaim + err error +) + +var _ = Describe("[jiva] TEST NODE STICKINESS", func() { + var ( + pvcName = "jiva-volume-claim" + ) + + BeforeEach(func() { + + By("building a pvc") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scObj.Name). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + By("creating above pvc") + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + By("verifying controller pod count") + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, jiva.CtrlLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") + + By("verifying replica pod count ") + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, jiva.ReplicaLabel, jiva.ReplicaCount) + Expect(replicaPodCount).To(Equal(jiva.ReplicaCount), "while checking replica pod count") + + By("verifying status as bound") + status := ops.IsPVCBoundEventually(pvcName) + Expect(status).To(Equal(true), "while checking status equal to bound") + + }) + + AfterEach(func() { + + By("deleting above pvc") + err := ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + By("verifying controller pod count as 0") + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, jiva.CtrlLabel, 0) + Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") + + By("verifying replica pod count as 0") + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, jiva.ReplicaLabel, 0) + Expect(replicaPodCount).To(Equal(0), "while checking replica pod count") + + By("verifying deleted pvc") + pvc := ops.IsPVCDeleted(pvcName) + Expect(pvc).To(Equal(true), "while trying to get deleted pvc") + + }) + + When("replica pod of pvc is deleted", func() { + It("should stick to same node after reconciliation", func() { + podList, err := ops.PodClient. + WithNamespace(namespaceObj.Name). + List(metav1.ListOptions{LabelSelector: jiva.ReplicaLabel}) + Expect(err).ShouldNot(HaveOccurred(), "while fetching replica pods") + nodeNames := pod.FromList(podList).GetScheduledNodes() + + for i := 0; i < times; i++ { + + By("deleting a replica pod") + err = ops.PodClient.Delete(podList.Items[0].Name, &metav1.DeleteOptions{}) + Expect(err).ShouldNot(HaveOccurred(), "while deleting replica pod") + + By("verifying deleted pod is terminated") + status := ops.IsPodDeletedEventually(namespaceObj.Name, podList.Items[0].Name) + Expect(status).To(Equal(true), "while checking for deleted pod") + + By("verifying running replica pod count ") + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, jiva.ReplicaLabel, jiva.ReplicaCount) + Expect(replicaPodCount).To(Equal(jiva.ReplicaCount), "while checking replica pod count") + + By("verifying node stickiness") + podList, err = ops.PodClient. + WithNamespace(namespaceObj.Name). + List(metav1.ListOptions{LabelSelector: jiva.ReplicaLabel}) + Expect(err).ShouldNot(HaveOccurred(), "while fetching replica pods") + + validate := pod.FromList(podList).IsMatchNodeAny(nodeNames) + + Expect(validate).To(Equal(true), "while checking node stickiness") + } + }) + }) + +}) diff --git a/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_suit_test.go b/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_suit_test.go deleted file mode 100644 index d28bd747a2..0000000000 --- a/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_suit_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright © 2018-2019 The OpenEBS Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package replicadeletion - -import ( - "flag" - "strconv" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - node "github.com/openebs/maya/pkg/kubernetes/node/v1alpha1" - pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" - unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" - "github.com/openebs/maya/tests/artifacts" - installer "github.com/openebs/maya/tests/artifacts/installer/v1alpha1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - // auth plugins - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -const ( - // defaultTimeOut is the default time in seconds - // for Eventually block - defaultTimeOut int = 500 - // defaultPollingInterval is the default polling - // time in seconds for the Eventually block - defaultPollingInterval int = 10 - // minNodeCount is the minimum number of nodes - // need to run this test - minNodeCount int = 3 - // parentDir is the OpenEBS artifacts source directory - parentDir artifacts.ArtifactSource = "../../" -) - -var ( - // defaultInstallerList holds the list of DefaultInstaller instances - defaultInstallerList []*installer.DefaultInstaller -) - -func TestSource(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node-Stickiness via pod deleteion") -} - -var kubeConfigPath string - -func init() { - flag.StringVar(&kubeConfigPath, "kubeConfigPath", "", "Based on arguments test will be triggered on corresponding cluster") -} - -// TODO: Refactor below code based on the framework changes -// getPodList returns the list of running pod object -func getPodList(podKubeClient *pod.KubeClient, namespace, lselector string, podCount int) (pods *corev1.PodList) { - // Verify phase of the pod - var err error - - if podKubeClient == nil { - podKubeClient = pod.NewKubeClient(pod.WithKubeConfigPath(kubeConfigPath)).WithNamespace(namespace) - } - - Eventually(func() int { - pods, err = podKubeClient. - List(metav1.ListOptions{LabelSelector: lselector}) - Expect(err).ShouldNot(HaveOccurred()) - return pod.ListBuilderForAPIList(pods). - WithFilter(pod.IsRunning()). - List(). - Len() - }, - defaultTimeOut, defaultPollingInterval). - Should(Equal(podCount), "Pod count should be "+string(podCount)) - return -} - -var _ = BeforeSuite(func() { - // Fetching the kube config path - //configPath, err := kubernetes.GetConfigPath() - //Expect(err).ShouldNot(HaveOccurred()) - - //// Setting the path in environemnt variable - //err = os.Setenv(string(v1alpha1.KubeConfigEnvironmentKey), configPath) - //Expect(err).ShouldNot(HaveOccurred()) - - // Check the running node count - nodesClient := node. - NewKubeClient(node.WithKubeConfigPath(kubeConfigPath)) - nodes, err := nodesClient.List(metav1.ListOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - nodeCnt := node. - NewListBuilder(). - WithAPIList(nodes). - WithFilter(node.IsReady()). - List(). - Len() - Expect(nodeCnt).Should(Equal(minNodeCount), "Running node count should be "+strconv.Itoa(int(minNodeCount))) - - // Fetch openebs component artifacts - openebsartifacts, errs := artifacts.GetArtifactsListUnstructuredFromFile(parentDir + artifacts.OpenEBSArtifacts) - Expect(errs).Should(HaveLen(0)) - - By("Installing OpenEBS components") - // Installing the artifacts to kubernetes cluster - for _, artifact := range openebsartifacts { - defaultInstaller, err := installer. - BuilderForObject(artifact). - WithKubeClient(unstruct.WithKubeConfigPath(kubeConfigPath)). - Build() - Expect(err).ShouldNot(HaveOccurred()) - // installerClient := defaultInstaller.NewKubeClient() - //Expect(err).ShouldNot(HaveOccurred()) - err = defaultInstaller.Install() - Expect(err).ShouldNot(HaveOccurred()) - defaultInstallerList = append(defaultInstallerList, defaultInstaller) - } - - podKubeClient := pod.NewKubeClient( - pod.WithKubeConfigPath(kubeConfigPath)). - WithNamespace(string(artifacts.OpenebsNamespace)) - // Check for maya-apiserver pod to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.MayaAPIServerLabelSelector), 1) - - // Check for provisioner pod to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSProvisionerLabelSelector), 1) - - // Check for snapshot operator to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSSnapshotOperatorLabelSelector), 1) - - // Check for admission server to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSAdmissionServerLabelSelector), 1) - - // Check for NDM pods to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSNDMLabelSelector), minNodeCount) - - // Check for cstor storage pool pods to get created and running - _ = getPodList(podKubeClient, string(artifacts.OpenebsNamespace), string(artifacts.OpenEBSCStorPoolLabelSelector), minNodeCount) - - By("OpenEBS components are in running state") -}) - -var _ = AfterSuite(func() { - By("Uinstalling OpenEBS Components and test namespace") - for _, componentInstaller := range defaultInstallerList { - err := componentInstaller.UnInstall() - Expect(err).ShouldNot(HaveOccurred()) - } -}) diff --git a/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_test.go b/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_test.go deleted file mode 100644 index 4a187be702..0000000000 --- a/tests/jiva/node-stickiness/replicadeletion/jiva_replica_deletion_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright © 2018-2019 The OpenEBS Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package replicadeletion - -import ( - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" - pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" - sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" - unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" - "github.com/openebs/maya/tests/artifacts" - nodestickiness "github.com/openebs/maya/tests/jiva/node-stickiness" - corev1 "k8s.io/api/core/v1" - k8serror "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - // auth plugins - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -const ( - testTimes = 20 - // jiva-rep-delete-ns namespace to deploy jiva ctrl & replicas - nameSpaceYaml artifacts.Artifact = ` -apiVersion: v1 -kind: Namespace -metadata: - name: jiva-rep-delete-ns -` -) - -var _ = Describe("[jiva] [node-stickiness] jiva replica pod node-stickiness test", func() { - var ( - // replicaLabel consist of defaultReplicaLabel and coressponding - // pvcLabel - replicaLabel string - // ctrlLabel consist of defaultReplicaLabel and coressponding - // pvcLabel - ctrlLabel string - //podListObj holds the PodList instance - podListObj *corev1.PodList - podKubeClient *pod.KubeClient - namespaceInstallerObj *nodestickiness.NodeStickyInstaller - // defaultReplicaLabel represents the jiva replica - defaultReplicaLabel = "openebs.io/replica=jiva-replica" - // defaultCtrlLabel represents the jiva controller - defaultCtrlLabel = "openebs.io/controller=jiva-controller" - // defaultPVCLabel represents the default OpenEBS PVC label key - defaultPVCLabel = "openebs.io/persistent-volume-claim=" - storageEngine = "jiva" - replicaCount = "1" - openebsCASConfigValue = "- name: ReplicaCount\n Value: " + replicaCount - scName = "jiva-single-replica" - pvcName = "jiva-vol1-1r-claim" - testNamespace = "jiva-rep-delete-ns" - accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - capacity = "5G" - //TODO: following variables should be moved in framework or openebs-artifacts - openebsCASType = "cas.openebs.io/cas-type" - openebsCASConfig = "cas.openebs.io/config" - openebsProvisioner = "openebs.io/provisioner-iscsi" - ) - BeforeEach(func() { - var err error - // Creates test namespace - By("Deploying the test namespace") - namespaceInstallerObj = nodestickiness. - NewNodeStickyInstallerForArtifacts( - artifacts.Artifact(nameSpaceYaml), - unstruct.WithKubeConfigPath(kubeConfigPath)) - err = namespaceInstallerObj.GetInstallerInstance().Install() - Expect(err).ShouldNot(HaveOccurred()) - namespaceUnstruct := namespaceInstallerObj.GetUnstructuredObject() - - By(fmt.Sprintf("creating a storage class named %s", scName)) - annotations := map[string]string{ - openebsCASType: storageEngine, - openebsCASConfig: openebsCASConfigValue, - } - scObj, err := sc.NewBuilder(). - WithName(scName). - WithAnnotations(annotations). - WithProvisioner(openebsProvisioner).Build() - Expect(err).ShouldNot(HaveOccurred()) - - By(fmt.Sprintf("deploying the storage class %s", scName)) - _, err = sc.NewKubeClient(sc.WithKubeConfigPath(kubeConfigPath)).Create(scObj) - Expect(err).ShouldNot(HaveOccurred()) - - By(fmt.Sprintf("creating a PVC named %s", pvcName)) - pvcObj, err := pvc.NewBuilder(). - WithName(pvcName). - WithNamespace(testNamespace). - WithStorageClass(scName). - WithAccessModes(accessModes). - WithCapacity(capacity).Build() - Expect(err).ShouldNot(HaveOccurred()) - - By(fmt.Sprintf("deploying the PVC named: %s in namespace: %s", pvcName, namespaceUnstruct.GetName())) - _, err = pvc. - NewKubeClient( - pvc.WithKubeConfigPath(kubeConfigPath)). - WithNamespace(testNamespace). - Create(pvcObj) - Expect(err).ShouldNot(HaveOccurred()) - - podKubeClient = pod. - NewKubeClient( - pod.WithKubeConfigPath(kubeConfigPath)). - WithNamespace(string(testNamespace)) - - // pvcLabel represents the coressponding pvc - pvcLabel := defaultPVCLabel + pvcName - replicaLabel = defaultReplicaLabel + "," + pvcLabel - ctrlLabel = defaultCtrlLabel + "," + pvcLabel - // Verify creation of jiva ctrl pod - _ = getPodList(podKubeClient, string(testNamespace), ctrlLabel, 1) - - // Verify creation of jiva replica pod - podListObj = getPodList(podKubeClient, string(testNamespace), replicaLabel, 1) - }) - - AfterEach(func() { - By("Uninstall test artifacts") - err := pvc. - NewKubeClient( - pvc.WithKubeConfigPath(kubeConfigPath)). - WithNamespace(testNamespace). - Delete(pvcName, &metav1.DeleteOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - err = sc. - NewKubeClient( - sc.WithKubeConfigPath(kubeConfigPath)). - Delete(scName, &metav1.DeleteOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - err = namespaceInstallerObj.GetInstallerInstance().UnInstall() - Expect(err).ShouldNot(HaveOccurred()) - }) - - Context("node stickiness with jiva replica pod deletion", func() { - // var nodeName, podName string - - It("should verify jiva replica pod sticks to one node", func() { - - for i := 0; i < testTimes; i++ { - By("fetching node name and podName of jiva replica pod") - //nodeName holds name of the node where the replica pod deployed - nodeName := podListObj.Items[0].Spec.NodeName - podName := podListObj.Items[0].ObjectMeta.Name - - By(fmt.Sprintf("deleting the running jiva replica pod: '%s'", podName)) - err := podKubeClient.Delete(podName, &metav1.DeleteOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - - // Makesure that pod is deleted successfully - Eventually(func() bool { - _, err := podKubeClient.Get(podName, metav1.GetOptions{}) - if k8serror.IsNotFound(err) { - return true - } - return false - }, - defaultTimeOut, defaultPollingInterval). - Should(BeTrue(), "Pod not found") - - By("waiting till jiva replica pod starts running") - podListObj = getPodList(podKubeClient, string(testNamespace), replicaLabel, 1) - - By("verifying jiva replica pod node matches with its old instance node") - Expect(podListObj.Items[0].Spec.NodeName).Should(Equal(nodeName)) - } - }) - }) -}) diff --git a/tests/jiva/node-stickiness/suite_test.go b/tests/jiva/node-stickiness/suite_test.go new file mode 100644 index 0000000000..bd14fc3576 --- /dev/null +++ b/tests/jiva/node-stickiness/suite_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodestickiness + +import ( + "strconv" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/artifacts" + "github.com/openebs/maya/tests/jiva" + + apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + namespace = "jiva-volume-ns" + scName = "jiva-volume-sc" + openebsCASConfigValue = "- name: ReplicaCount\n Value: " + openebsProvisioner = "openebs.io/provisioner-iscsi" + namespaceObj *corev1.Namespace + scObj *storagev1.StorageClass + annotations = map[string]string{} +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test jiva volume node stickiness") +} + +func init() { + jiva.ParseFlags() +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(jiva.KubeConfigPath)) + + annotations[string(apis.CASTypeKey)] = string(apis.JivaVolume) + annotations[string(apis.CASConfigKey)] = openebsCASConfigValue + strconv.Itoa(jiva.ReplicaCount) + + By("waiting for maya-apiserver pod to come into running state") + podCount := ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.MayaAPIServerLabelSelector), + 1, + ) + Expect(podCount).To(Equal(1)) + + By("waiting for openebs-provisioner pod to come into running state") + podCount = ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.OpenEBSProvisionerLabelSelector), + 1, + ) + Expect(podCount).To(Equal(1)) + + By("building a namespace") + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespace) + + By("building a storageclass") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithAnnotations(annotations). + WithProvisioner(openebsProvisioner).Build() + Expect(err).ShouldNot(HaveOccurred(), "while building storageclass {%s}", scName) + + By("creating above namespace") + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) + + By("creating above storageclass") + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.GenerateName) + +}) + +var _ = AfterSuite(func() { + + By("deleting storageclass") + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) + + By("deleting namespace") + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) + +}) diff --git a/tests/jiva/snapshot/provision_test.go b/tests/jiva/snapshot/provision_test.go index d331637c56..7675baa576 100644 --- a/tests/jiva/snapshot/provision_test.go +++ b/tests/jiva/snapshot/provision_test.go @@ -47,32 +47,32 @@ var _ = Describe("[jiva] TEST JIVA SNAPSHOT CREATION", func() { By("building a pvc") pvcObj, err = pvc.NewBuilder(). WithName(pvcName). - WithNamespace(nsName). - WithStorageClass(scName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scObj.Name). WithAccessModes(accessModes). WithCapacity(capacity).Build() Expect(err).ShouldNot( HaveOccurred(), "while building pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("creating above pvc") - _, err = ops.PVCClient.WithNamespace(nsName).Create(pvcObj) + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(pvcObj) Expect(err).To( BeNil(), "while creating pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 1) + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, ctrlLabel, 1) Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") By("verifying replica pod count") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, jiva.ReplicaCount) + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, replicaLabel, jiva.ReplicaCount) Expect(replicaPodCount).To(Equal(jiva.ReplicaCount), "while checking replica pod count") By("verifying status as bound") @@ -88,23 +88,23 @@ var _ = Describe("[jiva] TEST JIVA SNAPSHOT CREATION", func() { By("building a snapshot") snapObj, err = snap.NewBuilder(). WithName(snapName). - WithNamespace(nsName). + WithNamespace(namespaceObj.Name). WithPVC(pvcName). Build() Expect(err).To( BeNil(), "while building snapshot {%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("creating above snapshot") - _, err = ops.SnapClient.WithNamespace(nsName).Create(snapObj) + _, err = ops.SnapClient.WithNamespace(namespaceObj.Name).Create(snapObj) Expect(err).To( BeNil(), "while creating snapshot {%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("verifying type as ready") @@ -123,7 +123,7 @@ var _ = Describe("[jiva] TEST JIVA SNAPSHOT CREATION", func() { BeNil(), "while deleting snapshot {%s} in namespace {%s}", snapName, - nsName, + namespaceObj.Name, ) By("verifying deleted snapshot") @@ -142,15 +142,15 @@ var _ = Describe("[jiva] TEST JIVA SNAPSHOT CREATION", func() { BeNil(), "while deleting pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count as 0") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 0) + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, ctrlLabel, 0) Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") By("verifying replica pod count as 0") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, 0) + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, replicaLabel, 0) Expect(replicaPodCount).To(Equal(0), "while checking replica pod count") By("verifying deleted pvc") diff --git a/tests/jiva/snapshot/suite_test.go b/tests/jiva/snapshot/suite_test.go index b7add0f1ed..f71ca8cb2b 100644 --- a/tests/jiva/snapshot/suite_test.go +++ b/tests/jiva/snapshot/suite_test.go @@ -40,10 +40,10 @@ import ( ) var ( - nsObj *corev1.Namespace + namespaceObj *corev1.Namespace snapObj *snapshot.VolumeSnapshot scObj *storagev1.StorageClass - nsName = "jiva-snapshot-ns" + namespace = "jiva-snapshot-ns" scName = "jiva-snapshot-sc" openebsProvisioner = "openebs.io/provisioner-iscsi" openebsCASConfigValue = "- name: ReplicaCount\n Value: " @@ -94,35 +94,35 @@ var _ = BeforeSuite(func() { Expect(podCount).To(Equal(1)) By("building a namespace") - nsObj, err = ns.NewBuilder(). - WithName(nsName). + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). APIObject() - Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespace) By("building a storageclass") scObj, err = sc.NewBuilder(). - WithName(scName). + WithGenerateName(scName). WithAnnotations(annotations). WithProvisioner(openebsProvisioner).Build() Expect(err).ShouldNot(HaveOccurred(), "while building storageclass {%s}", scName) By("creating a namespace") - _, err = ops.NSClient.Create(nsObj) - Expect(err).To(BeNil(), "while creating namespace {%s}", nsObj.Name) + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) By("creating a storageclass") - _, err = ops.SCClient.Create(scObj) - Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.Name) + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.GenerateName) }) var _ = AfterSuite(func() { By("deleting storageclass") - err := ops.SCClient.Delete(scName, &metav1.DeleteOptions{}) + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) By("deleting namespace") - err = ops.NSClient.Delete(nsName, &metav1.DeleteOptions{}) - Expect(err).To(BeNil(), "while deleting namespace {%s}", nsName) + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespace) }) diff --git a/tests/jiva/volume/provision_test.go b/tests/jiva/volume/provision_test.go index 601b75b6f2..83b98ff5b3 100644 --- a/tests/jiva/volume/provision_test.go +++ b/tests/jiva/volume/provision_test.go @@ -45,32 +45,32 @@ var _ = Describe("[jiva] TEST VOLUME PROVISIONING", func() { By("building a pvc") pvcObj, err = pvc.NewBuilder(). WithName(pvcName). - WithNamespace(nsName). - WithStorageClass(scName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scObj.Name). WithAccessModes(accessModes). WithCapacity(capacity).Build() Expect(err).ShouldNot( HaveOccurred(), "while building pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("creating above pvc") - _, err = ops.PVCClient.WithNamespace(nsName).Create(pvcObj) + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(pvcObj) Expect(err).To( BeNil(), "while creating pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 1) + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, ctrlLabel, 1) Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") By("verifying replica pod count ") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, jiva.ReplicaCount) + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, replicaLabel, jiva.ReplicaCount) Expect(replicaPodCount).To(Equal(jiva.ReplicaCount), "while checking replica pod count") By("verifying status as bound") @@ -89,15 +89,15 @@ var _ = Describe("[jiva] TEST VOLUME PROVISIONING", func() { BeNil(), "while deleting pvc {%s} in namespace {%s}", pvcName, - nsName, + namespaceObj.Name, ) By("verifying controller pod count as 0") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 0) + controllerPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, ctrlLabel, 0) Expect(controllerPodCount).To(Equal(0), "while checking controller pod count") By("verifying replica pod count as 0") - replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, 0) + replicaPodCount := ops.GetPodRunningCountEventually(namespaceObj.Name, replicaLabel, 0) Expect(replicaPodCount).To(Equal(0), "while checking replica pod count") By("verifying deleted pvc") diff --git a/tests/jiva/volume/suite_test.go b/tests/jiva/volume/suite_test.go index 5db1bd9082..259996201d 100644 --- a/tests/jiva/volume/suite_test.go +++ b/tests/jiva/volume/suite_test.go @@ -36,11 +36,11 @@ import ( ) var ( - nsName = "jiva-volume-ns" + namespace = "jiva-volume-ns" scName = "jiva-volume-sc" openebsCASConfigValue = "- name: ReplicaCount\n Value: " openebsProvisioner = "openebs.io/provisioner-iscsi" - nsObj *corev1.Namespace + namespaceObj *corev1.Namespace scObj *storagev1.StorageClass annotations = map[string]string{} ) @@ -80,36 +80,36 @@ var _ = BeforeSuite(func() { Expect(podCount).To(Equal(1)) By("building a namespace") - nsObj, err = ns.NewBuilder(). - WithName(nsName). + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). APIObject() - Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", nsName) + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespace) By("building a storageclass") scObj, err = sc.NewBuilder(). - WithName(scName). + WithGenerateName(scName). WithAnnotations(annotations). WithProvisioner(openebsProvisioner).Build() Expect(err).ShouldNot(HaveOccurred(), "while building storageclass {%s}", scName) By("creating a namespace") - _, err = ops.NSClient.Create(nsObj) - Expect(err).To(BeNil(), "while creating storageclass {%s}", nsObj.Name) + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) By("creating a storageclass") - _, err = ops.SCClient.Create(scObj) - Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.Name) + scObj, err = ops.SCClient.Create(scObj) + Expect(err).To(BeNil(), "while creating storageclass {%s}", scObj.GenerateName) }) var _ = AfterSuite(func() { By("deleting storageclass") - err := ops.SCClient.Delete(scName, &metav1.DeleteOptions{}) + err = ops.SCClient.Delete(scObj.Name, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) By("deleting namespace") - err = ops.NSClient.Delete(nsName, &metav1.DeleteOptions{}) - Expect(err).To(BeNil(), "while deleting namespace {%s}", nsObj.Name) + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) }) diff --git a/tests/kubernetes/deployment/app_test.go b/tests/kubernetes/deployment/app_test.go new file mode 100644 index 0000000000..68948aed4a --- /dev/null +++ b/tests/kubernetes/deployment/app_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + con "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + deploy "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + deployName = "busybox-deploy" + label = "demo=deployment" + deployObj *deploy.Deploy + err error + command []string + labelselector = map[string]string{ + "demo": "deployment", + } +) + +var _ = Describe("TEST DEPLOYMENT CREATION ", func() { + + When("deployment with busybox image is created", func() { + It("should create a deployment and a running pod", func() { + + command = append(command, "sleep", "3600") + + By("building a deployment") + deployObj, err = deploy.NewBuilder(). + WithName(deployName). + WithNamespace(namespaceObj.Name). + WithLabelsAndSelector(labelselector). + WithContainerBuilder( + con.NewBuilder(). + WithName("busybox"). + WithImage("busybox"). + WithCommand(command), + ). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building delpoyment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("creating above deployment") + _, err = ops.DeployClient.WithNamespace(namespaceObj.Name).Create(deployObj.Object) + Expect(err).To( + BeNil(), + "while creating deployment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("verifying pod count as 1") + podCount := ops.GetPodRunningCountEventually(namespaceObj.Name, label, 1) + Expect(podCount).To(Equal(1), "while verifying pod count") + }) + }) + + When("deployment is deleted", func() { + It("should not have any deployment or running pod", func() { + + By("deleting above deployment") + err := ops.DeployClient.WithNamespace(namespaceObj.Name).Delete(deployName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting deployment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("verifying pod count as 0") + podCount := ops.GetPodRunningCountEventually(namespaceObj.Name, label, 0) + Expect(podCount).To(Equal(0), "while verifying pod count") + + }) + }) + +}) diff --git a/tests/kubernetes/deployment/suite_test.go b/tests/kubernetes/deployment/suite_test.go new file mode 100644 index 0000000000..936fa93d86 --- /dev/null +++ b/tests/kubernetes/deployment/suite_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "flag" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + kubeConfigPath string + namespace = "application-ns" + namespaceObj *corev1.Namespace +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test application deployment") +} + +func init() { + flag.StringVar(&kubeConfigPath, "kubeconfig", "", "path to kubeconfig to invoke kubernetes API calls") +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(kubeConfigPath)) + + By("building a namespace") + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespaceObj.GenerateName) + + By("creating a namespace") + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) + +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) + +}) diff --git a/tests/localpv/hostpath_test.go b/tests/localpv/hostpath_test.go new file mode 100644 index 0000000000..cc6298655b --- /dev/null +++ b/tests/localpv/hostpath_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package localpv + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1" + deploy "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1" + pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" + volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + deployName = "busybox-deploy" + label = "demo=deployment" + deployObj *deploy.Deploy + labelselector = map[string]string{ + "demo": "deployment", + } +) + +var _ = Describe("TEST LOCAL PV", func() { + + When("pvc with storageclass openebs-hostpath is created", func() { + It("should create a pvc ", func() { + var ( + scName = "openebs-hostpath" + ) + + By("building a pvc") + pvcObj, err = pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(namespaceObj.Name). + WithStorageClass(scName). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + By("creating above pvc") + _, err = ops.PVCClient.WithNamespace(namespaceObj.Name).Create(pvcObj) + Expect(err).To( + BeNil(), + "while creating pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + }) + }) + + When("deployment with busybox image is created", func() { + It("should create a deployment and a running pod", func() { + + By("building a deployment") + deployObj, err = deploy.NewBuilder(). + WithName(deployName). + WithNamespace(namespaceObj.Name). + WithLabelsAndSelector(labelselector). + WithContainerBuilder( + container.NewBuilder(). + WithName("busybox"). + WithImage("busybox"). + WithCommand( + []string{ + "sleep", + "3600", + }, + ). + WithVolumeMounts( + []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "demo-vol1", + MountPath: "/mnt/store1", + }, + }, + ), + ). + WithVolumeBuilder( + volume.NewBuilder(). + WithName("demo-vol1"). + WithPVCSource(pvcName), + ). + Build() + Expect(err).ShouldNot( + HaveOccurred(), + "while building delpoyment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("creating above deployment") + _, err = ops.DeployClient.WithNamespace(namespaceObj.Name).Create(deployObj.Object) + Expect(err).To( + BeNil(), + "while creating deployment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("verifying pod count as 1") + podCount := ops.GetPodRunningCountEventually(namespaceObj.Name, label, 1) + Expect(podCount).To(Equal(1), "while verifying pod count") + + }) + }) + + When("deployment is deleted", func() { + It("should not have any deployment or running pod", func() { + + By("deleting above deployment") + err = ops.DeployClient.WithNamespace(namespaceObj.Name).Delete(deployName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting deployment {%s} in namespace {%s}", + deployName, + namespaceObj.Name, + ) + + By("verifying pod count as 0") + podCount := ops.GetPodRunningCountEventually(namespaceObj.Name, label, 0) + Expect(podCount).To(Equal(0), "while verifying pod count") + + }) + }) + + When("pvc with storageclass openebs-hostpath is deleted ", func() { + It("should delete the pvc", func() { + + By("deleting above pvc") + err = ops.PVCClient.Delete(pvcName, &metav1.DeleteOptions{}) + Expect(err).To( + BeNil(), + "while deleting pvc {%s} in namespace {%s}", + pvcName, + namespaceObj.Name, + ) + + }) + }) + +}) diff --git a/tests/localpv/suite_test.go b/tests/localpv/suite_test.go new file mode 100644 index 0000000000..3cdb12098f --- /dev/null +++ b/tests/localpv/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The OpenEBS Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package localpv + +import ( + "flag" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/openebs/maya/tests" + "github.com/openebs/maya/tests/artifacts" + + ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + // auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +var ( + kubeConfigPath string + namespace = "localpv-ns" + namespaceObj *corev1.Namespace + pvcObj *corev1.PersistentVolumeClaim + pvcName = "pvc-hp" + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "2Gi" + err error +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test application deployment") +} + +func init() { + flag.StringVar(&kubeConfigPath, "kubeconfig", "", "path to kubeconfig to invoke kubernetes API calls") +} + +var ops *tests.Operations + +var _ = BeforeSuite(func() { + + ops = tests.NewOperations(tests.WithKubeConfigPath(kubeConfigPath)) + + By("waiting for maya-apiserver pod to come into running state") + podCount := ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.MayaAPIServerLabelSelector), + 1, + ) + Expect(podCount).To(Equal(1)) + + By("waiting for openebs-localpv-provisioner pod to come into running state") + podCount = ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.OpenEBSLocalPVProvisionerLabelSelector), + 1, + ) + Expect(podCount).To(Equal(1)) + + By("building a namespace") + namespaceObj, err = ns.NewBuilder(). + WithGenerateName(namespace). + APIObject() + Expect(err).ShouldNot(HaveOccurred(), "while building namespace {%s}", namespaceObj.GenerateName) + + By("creating above namespace") + namespaceObj, err = ops.NSClient.Create(namespaceObj) + Expect(err).To(BeNil(), "while creating namespace {%s}", namespaceObj.GenerateName) + +}) + +var _ = AfterSuite(func() { + + By("deleting namespace") + err = ops.NSClient.Delete(namespaceObj.Name, &metav1.DeleteOptions{}) + Expect(err).To(BeNil(), "while deleting namespace {%s}", namespaceObj.Name) + +}) diff --git a/tests/operations.go b/tests/operations.go index 6549be0ef8..b3697cd772 100644 --- a/tests/operations.go +++ b/tests/operations.go @@ -26,8 +26,10 @@ import ( apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" csp "github.com/openebs/maya/pkg/cstorpool/v1alpha3" cv "github.com/openebs/maya/pkg/cstorvolume/v1alpha1" + cvr "github.com/openebs/maya/pkg/cstorvolumereplica/v1alpha1" errors "github.com/openebs/maya/pkg/errors/v1alpha1" kubeclient "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1" + deploy "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1" ns "github.com/openebs/maya/pkg/kubernetes/namespace/v1alpha1" pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1" @@ -38,6 +40,7 @@ import ( templatefuncs "github.com/openebs/maya/pkg/templatefuncs/v1alpha1" unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" result "github.com/openebs/maya/pkg/upgrade/result/v1alpha1" + "github.com/openebs/maya/tests/artifacts" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,7 +49,7 @@ import ( ) const ( - maxRetry = 50 + maxRetry = 30 ) // Options holds the args used for exec'ing into the pod @@ -69,8 +72,10 @@ type Operations struct { SPCClient *spc.Kubeclient SVCClient *svc.Kubeclient CVClient *cv.Kubeclient + CVRClient *cvr.Kubeclient URClient *result.Kubeclient UnstructClient *unstruct.Kubeclient + DeployClient *deploy.Kubeclient kubeConfigPath string } @@ -158,12 +163,47 @@ func (ops *Operations) withDefaults() { if ops.CVClient == nil { ops.CVClient = cv.NewKubeclient(cv.WithKubeConfigPath(ops.kubeConfigPath)) } + if ops.CVRClient == nil { + ops.CVRClient = cvr.NewKubeclient(cvr.WithKubeConfigPath(ops.kubeConfigPath)) + } if ops.URClient == nil { ops.URClient = result.NewKubeClient(result.WithKubeConfigPath(ops.kubeConfigPath)) } if ops.UnstructClient == nil { ops.UnstructClient = unstruct.NewKubeClient(unstruct.WithKubeConfigPath(ops.kubeConfigPath)) } + if ops.DeployClient == nil { + ops.DeployClient = deploy.NewKubeClient(deploy.WithKubeConfigPath(ops.kubeConfigPath)) + } +} + +// VerifyOpenebs verify running state of required openebs control plane components +func (ops *Operations) VerifyOpenebs(expectedPodCount int) *Operations { + By("waiting for maya-apiserver pod to come into running state") + podCount := ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.MayaAPIServerLabelSelector), + expectedPodCount, + ) + Expect(podCount).To(Equal(expectedPodCount)) + + By("waiting for openebs-provisioner pod to come into running state") + podCount = ops.GetPodRunningCountEventually( + string(artifacts.OpenebsNamespace), + string(artifacts.OpenEBSProvisionerLabelSelector), + expectedPodCount, + ) + Expect(podCount).To(Equal(expectedPodCount)) + + By("Verifying 'admission-server' pod status as running") + _ = ops.GetPodRunningCountEventually(string(artifacts.OpenebsNamespace), + string(artifacts.OpenEBSAdmissionServerLabelSelector), + expectedPodCount, + ) + + Expect(podCount).To(Equal(expectedPodCount)) + + return ops } // GetPodRunningCountEventually gives the number of pods running eventually @@ -179,9 +219,9 @@ func (ops *Operations) GetPodRunningCountEventually(namespace, lselector string, return podCount } -// GetCstorVolumeCountEventually gives the count of cstorvolume based on +// GetCstorVolumeCount gives the count of cstorvolume based on // selecter -func (ops *Operations) GetCstorVolumeCountEventually(namespace, lselector string, expectedCVCount int) int { +func (ops *Operations) GetCstorVolumeCount(namespace, lselector string, expectedCVCount int) int { var cvCount int for i := 0; i < maxRetry; i++ { cvCount = ops.GetCVCount(namespace, lselector) @@ -193,6 +233,26 @@ func (ops *Operations) GetCstorVolumeCountEventually(namespace, lselector string return cvCount } +// GetCstorVolumeCountEventually gives the count of cstorvolume based on +// selecter eventually +func (ops *Operations) GetCstorVolumeCountEventually(namespace, lselector string, expectedCVCount int) bool { + return Eventually(func() int { + cvCount := ops.GetCVCount(namespace, lselector) + return cvCount + }, + 60, 10).Should(Equal(expectedCVCount)) +} + +// GetCstorVolumeReplicaCountEventually gives the count of cstorvolume based on +// selecter eventually +func (ops *Operations) GetCstorVolumeReplicaCountEventually(namespace, lselector string, expectedCVRCount int) bool { + return Eventually(func() int { + cvCount := ops.GetCstorVolumeReplicaCount(namespace, lselector) + return cvCount + }, + 60, 10).Should(Equal(expectedCVRCount)) +} + // GetPodRunningCount gives number of pods running currently func (ops *Operations) GetPodRunningCount(namespace, lselector string) int { pods, err := ops.PodClient. @@ -219,6 +279,19 @@ func (ops *Operations) GetCVCount(namespace, lselector string) int { Len() } +// GetCstorVolumeReplicaCount gives cstorvolumereplica healthy count currently based on selecter +func (ops *Operations) GetCstorVolumeReplicaCount(namespace, lselector string) int { + cvrs, err := ops.CVRClient. + List(metav1.ListOptions{LabelSelector: lselector}) + Expect(err).ShouldNot(HaveOccurred()) + return cvr. + ListBuilder(). + WithAPIList(cvrs). + WithFilter(cvr.IsHealthy()). + List(). + Len() +} + // IsPVCBound checks if the pvc is bound or not func (ops *Operations) IsPVCBound(pvcName string) bool { volume, err := ops.PVCClient. @@ -227,6 +300,32 @@ func (ops *Operations) IsPVCBound(pvcName string) bool { return pvc.NewForAPIObject(volume).IsBound() } +// IsPVCBoundEventually checks if the pvc is bound or not eventually +func (ops *Operations) IsPVCBoundEventually(pvcName string) bool { + return Eventually(func() bool { + volume, err := ops.PVCClient. + Get(pvcName, metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + return pvc.NewForAPIObject(volume).IsBound() + }, + 60, 10). + Should(BeTrue()) +} + +// IsPodRunningEventually checks if the pvc is bound or not eventually +func (ops *Operations) IsPodRunningEventually(namespace, podName string) bool { + return Eventually(func() bool { + p, err := ops.PodClient. + WithNamespace(namespace). + Get(podName, metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + return pod.NewForAPIObject(p). + IsRunning() + }, + 150, 10). + Should(BeTrue()) +} + // GetSnapshotTypeEventually returns type of snapshot eventually func (ops *Operations) GetSnapshotTypeEventually(snapName string) string { var snaptype string @@ -257,7 +356,7 @@ func (ops *Operations) IsSnapshotDeleted(snapName string) bool { _, err := ops.SnapClient. Get(snapName, metav1.GetOptions{}) if err != nil { - return true + return isNotFound(err) } time.Sleep(5 * time.Second) } @@ -276,6 +375,18 @@ func (ops *Operations) IsPVCDeleted(pvcName string) bool { return false } +// IsPodDeletedEventually checks if the pod is deleted or not eventually +func (ops *Operations) IsPodDeletedEventually(namespace, podName string) bool { + return Eventually(func() bool { + _, err := ops.PodClient. + WithNamespace(namespace). + Get(podName, metav1.GetOptions{}) + return isNotFound(err) + }, + 60, 10). + Should(BeTrue()) +} + // GetPVNameFromPVCName gives the pv name for the given pvc func (ops *Operations) GetPVNameFromPVCName(pvcName string) string { p, err := ops.PVCClient. @@ -315,6 +426,24 @@ func (ops *Operations) DeleteCSP(spcName string, deleteCount int) { } } +// GetCSPCount gets csp count based on spcName +func (ops *Operations) GetCSPCount(spcName string, expectedCSPCount int) int { + var cspCount int + for i := 0; i < maxRetry; i++ { + cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) + Expect(err).To(BeNil()) + cspCount = csp. + ListBuilderForAPIObject(cspAPIList). + List(). + Len() + if cspCount == expectedCSPCount { + return cspCount + } + time.Sleep(5 * time.Second) + } + return cspCount +} + // GetHealthyCSPCount gets healthy csp based on spcName func (ops *Operations) GetHealthyCSPCount(spcName string, expectedCSPCount int) int { var cspCount int @@ -334,6 +463,22 @@ func (ops *Operations) GetHealthyCSPCount(spcName string, expectedCSPCount int) return cspCount } +// GetHealthyCSPCountEventually gets healthy csp based on spcName +func (ops *Operations) GetHealthyCSPCountEventually(spcName string, expectedCSPCount int) bool { + return Eventually(func() int { + cspAPIList, err := ops.CSPClient.List(metav1.ListOptions{}) + Expect(err).To(BeNil()) + count := csp. + ListBuilderForAPIObject(cspAPIList). + List(). + Filter(csp.HasLabel(string(apis.StoragePoolClaimCPK), spcName), csp.IsStatus("Healthy")). + Len() + return count + }, + 60, 10). + Should(Equal(expectedCSPCount)) +} + // ExecPod executes arbitrary command inside the pod func (ops *Operations) ExecPod(opts *Options) ([]byte, error) { var ( diff --git a/tests/sts/sts_test.go b/tests/sts/sts_test.go index 5e8d34eb31..61b61532d7 100644 --- a/tests/sts/sts_test.go +++ b/tests/sts/sts_test.go @@ -145,7 +145,7 @@ var _ = Describe("StatefulSet", func() { // Check for CVR to get healthy Eventually(func() int { cvrs, err := cvr. - KubeClient(cvr.WithNamespace("")). + NewKubeclient(cvr.WithNamespace("")). List(metav1.ListOptions{LabelSelector: replicaAntiAffinityLabel}) Expect(err).ShouldNot(HaveOccurred()) return cvr. @@ -270,7 +270,7 @@ var _ = Describe("StatefulSet", func() { Expect(pvcList.Len()).Should(Equal(3), "pvc count should be "+string(3)) cvrs, err := cvr. - KubeClient(cvr.WithNamespace("")). + NewKubeclient(cvr.WithNamespace("")). List(metav1.ListOptions{LabelSelector: replicaAntiAffinityLabel}) Expect(cvrs.Items).Should(HaveLen(3), "cvr count should be "+string(3)) diff --git a/tests/upgrade/0.8.2-0.9.0/jiva/suite_test.go b/tests/upgrade/0.8.2-0.9.0/jiva/suite_test.go index 7974f81a04..a52e7db108 100644 --- a/tests/upgrade/0.8.2-0.9.0/jiva/suite_test.go +++ b/tests/upgrade/0.8.2-0.9.0/jiva/suite_test.go @@ -15,7 +15,6 @@ package jiva import ( "flag" - "os" "strconv" "testing" @@ -23,7 +22,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - "github.com/openebs/maya/pkg/client/k8s/v1alpha1" pvc "github.com/openebs/maya/pkg/kubernetes/persistentvolumeclaim/v1alpha1" sc "github.com/openebs/maya/pkg/kubernetes/storageclass/v1alpha1" unstruct "github.com/openebs/maya/pkg/unstruct/v1alpha2" @@ -76,10 +74,6 @@ var _ = BeforeSuite(func() { ops = tests.NewOperations(tests.WithKubeConfigPath(kubeConfigPath)) openebsCASConfigValue = openebsCASConfigValue + strconv.Itoa(replicaCount) - // Setting the path in environemnt variable - err := os.Setenv(string(v1alpha1.KubeConfigEnvironmentKey), kubeConfigPath) - Expect(err).ShouldNot(HaveOccurred()) - By("applying openebs 0.8.2") applyFromURL(openebsURL) @@ -97,7 +91,7 @@ var _ = BeforeSuite(func() { } By("building a storageclass") - scObj, err = sc.NewBuilder(). + scObj, err := sc.NewBuilder(). WithName(scName). WithAnnotations(annotations). WithProvisioner(openebsProvisioner).Build() @@ -131,8 +125,8 @@ var _ = BeforeSuite(func() { ) By("verifying controller pod count ") - controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, replicaCount) - Expect(controllerPodCount).To(Equal(replicaCount), "while checking controller pod count") + controllerPodCount := ops.GetPodRunningCountEventually(nsName, ctrlLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") By("verifying replica pod count ") replicaPodCount := ops.GetPodRunningCountEventually(nsName, replicaLabel, replicaCount) @@ -165,7 +159,7 @@ var _ = AfterSuite(func() { By("deleting storageclass") err = ops.SCClient.Delete(scName, &metav1.DeleteOptions{}) - Expect(err).To(BeNil(), "while deleting storageclass {%s}", scObj.Name) + Expect(err).To(BeNil(), "while deleting storageclass {%s}", scName) By("cleanup") deleteFromURL(jobURL) @@ -173,13 +167,14 @@ var _ = AfterSuite(func() { deleteFromURL(crURL) deleteFromURL(rbacURL) deleteFromURL(openebsURL) - By("waiting for maya-apiserver pod to come into running state") + By("waiting for maya-apiserver pod to terminate") podCount := ops.GetPodRunningCountEventually( string(artifacts.OpenebsNamespace), string(artifacts.MayaAPIServerLabelSelector), 0, ) Expect(podCount).To(Equal(0)) + // deleting all completed pods podList, err := ops.PodClient. WithNamespace("default"). List(metav1.ListOptions{}) diff --git a/tests/upgrade/0.8.2-0.9.0/jiva/volume_test.go b/tests/upgrade/0.8.2-0.9.0/jiva/volume_test.go index ed2e829dd1..895edcdada 100644 --- a/tests/upgrade/0.8.2-0.9.0/jiva/volume_test.go +++ b/tests/upgrade/0.8.2-0.9.0/jiva/volume_test.go @@ -17,8 +17,14 @@ limitations under the License. package jiva import ( + "encoding/json" + "strconv" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + jiva "github.com/openebs/maya/pkg/client/jiva" + "github.com/openebs/maya/tests" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( @@ -26,6 +32,7 @@ var ( urLable = "upgradejob.openebs.io/name=jiva-volume-upgrade,upgradeitem.openebs.io/name=" upgradedLabel = "openebs.io/persistent-volume-claim=jiva-volume-claim,openebs.io/version=0.9.0" data map[string]string + nodes []string ) var _ = Describe("[jiva] TEST VOLUME UPGRADE", func() { @@ -33,6 +40,13 @@ var _ = Describe("[jiva] TEST VOLUME UPGRADE", func() { When("jiva pvc is upgraded", func() { It("should create new controller and replica pods of version 0.9.0", func() { + // fetching replica pods before upgrade + podList, err := ops.PodClient.List(metav1.ListOptions{LabelSelector: replicaLabel}) + Expect(err).ShouldNot(HaveOccurred(), "while listing replica pod") + for _, pod := range podList.Items { + nodes = append(nodes, pod.Spec.NodeName) + } + // fetching name of pv to update the configmap with the resource to // be upgraded pvName := ops.GetPVNameFromPVCName(pvcName) @@ -61,8 +75,8 @@ var _ = Describe("[jiva] TEST VOLUME UPGRADE", func() { Expect(status).To(Equal(true), "while checking upgraderesult") By("verifying controller pod count") - controllerPodCount := ops.GetPodRunningCountEventually("default", ctrlLabel, replicaCount) - Expect(controllerPodCount).To(Equal(replicaCount), "while checking controller pod count") + controllerPodCount := ops.GetPodRunningCountEventually("default", ctrlLabel, 1) + Expect(controllerPodCount).To(Equal(1), "while checking controller pod count") By("verifying replica pod count") replicaPodCount := ops.GetPodRunningCountEventually("default", replicaLabel, replicaCount) @@ -72,6 +86,50 @@ var _ = Describe("[jiva] TEST VOLUME UPGRADE", func() { podCount := ops.GetPodRunningCountEventually("default", upgradedLabel, replicaCount+1) Expect(podCount).To(Equal(replicaCount+1), "while checking pod version") + By("verifying node stickiness after upgrade") + // fetching replica pods after upgrade + podList, err = ops.PodClient.List(metav1.ListOptions{LabelSelector: replicaLabel}) + Expect(err).ShouldNot(HaveOccurred(), "while listing replica pod") + for _, pod := range podList.Items { + Expect(nodes).To( + ContainElement(pod.Spec.NodeName), + "while verifying node stickness of replicas", + ) + } + + By("verifying registered replica count and replication factor") + podList, err = ops.PodClient.List(metav1.ListOptions{LabelSelector: ctrlLabel}) + Expect(err).ShouldNot(HaveOccurred(), "while listing controller pod") + + replicationFactor := "" + for _, env := range podList.Items[0].Spec.Containers[0].Env { + if env.Name == "REPLICATION_FACTOR" { + replicationFactor = env.Value + } + } + Expect(replicationFactor).ToNot(Equal(""), "while fetching replication factor") + + curl := "curl http://localhost:9501/v1/volumes" + cmd := []string{"/bin/bash", "-c", curl} + opts := tests.NewOptions(). + WithPodName(podList.Items[0].Name). + WithNamespace(podList.Items[0].Namespace). + WithContainer(podList.Items[0].Spec.Containers[0].Name). + WithCommand(cmd...) + + out, err := ops.ExecPod(opts) + Expect(err).To(BeNil(), "while executing command in container ", cmd) + + volumes := jiva.VolumeCollection{} + err = json.Unmarshal(out, &volumes) + Expect(err).To(BeNil(), "while unmarshalling volumes", string(out)) + + registeredReplicaCount := strconv.Itoa(volumes.Data[0].ReplicaCount) + Expect(registeredReplicaCount).To( + Equal(replicationFactor), + "while verifying registered replica count as replication factor", + ) + }) })