diff --git a/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml b/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml index 4ac10209..6ccac103 100644 --- a/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml +++ b/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml @@ -33,4 +33,4 @@ spec: storageClassName: testclass resources: requests: - storage: teststorage + storage: teststorage \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/run_litmus_test.yml b/e2e-tests/apps/busybox/deployers/run_e2e_test.yml similarity index 76% rename from e2e-tests/apps/busybox/deployers/run_litmus_test.yml rename to e2e-tests/apps/busybox/deployers/run_e2e_test.yml index a2654ca0..904c2fd5 100644 --- a/e2e-tests/apps/busybox/deployers/run_litmus_test.yml +++ b/e2e-tests/apps/busybox/deployers/run_e2e_test.yml @@ -2,16 +2,16 @@ apiVersion: batch/v1 kind: Job metadata: - generateName: litmus-busybox-deploy- - namespace: litmus + generateName: busybox-deploy- + namespace: e2e spec: template: metadata: - name: litmus + name: busybox-deploy labels: - app: busybox-litmus + app: busybox spec: - serviceAccountName: litmus + serviceAccountName: e2e restartPolicy: Never containers: - name: ansibletest @@ -23,33 +23,33 @@ spec: # Name of the storage class to use for volume provisioning - name: STORAGE_CLASS - value: '' + value: 'openebs-lvmsc' # This is the namespace where busybox application will be deployed - name: APP_NAMESPACE - value: '' + value: 'busybox' # Application label for busybox deployment/statefulset in `key=value` format - name: APP_LABEL - value: '' + value: 'app=busybox' # Application PVC name - name: APP_PVC - value: '' + value: 'busybox-pvc' # Persistent volume storage capacity (for e.g, 5Gi) - name: PV_CAPACITY - value: '' + value: '5Gi' # Use: `statefuleset` to deploy busybox application as statefulset # Use: `deployment` to deploy busybox application as deployment - name: DEPLOY_TYPE - value: '' + value: 'deployment' # Use: `provision` to deploy the application # Use: `deprovision` to deprovision the application - name: ACTION - value: '' + value: 'provision' command: ["/bin/bash"] - args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/test.yml b/e2e-tests/apps/busybox/deployers/test.yml index ba1c2a0d..898582cf 100644 --- a/e2e-tests/apps/busybox/deployers/test.yml +++ b/e2e-tests/apps/busybox/deployers/test.yml @@ -12,8 +12,8 @@ ## Generating the testname for deployment - include_tasks: /e2e-tests/hack/create_testname.yml - ## RECORD START-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'SOT' @@ -72,7 +72,7 @@ flag: "Fail" always: - ## RECORD END-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: - status: 'EOT' + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/test_vars.yml b/e2e-tests/apps/busybox/deployers/test_vars.yml index 13959006..7264b977 100644 --- a/e2e-tests/apps/busybox/deployers/test_vars.yml +++ b/e2e-tests/apps/busybox/deployers/test_vars.yml @@ -16,6 +16,4 @@ app_pvc: "{{ lookup('env','APP_PVC') }}" deploy_type: "{{ lookup('env','DEPLOY_TYPE') }}" -action: "{{ lookup('env','ACTION') }}" - - +action: "{{ lookup('env','ACTION') }}" \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/busybox_liveness.yml b/e2e-tests/apps/busybox/liveness/busybox_liveness.yml index 53b08e5d..84781b85 100644 --- a/e2e-tests/apps/busybox/liveness/busybox_liveness.yml +++ b/e2e-tests/apps/busybox/liveness/busybox_liveness.yml @@ -1,5 +1,4 @@ --- -# Source: openebs/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: @@ -78,4 +77,4 @@ spec: value: pod-name command: ["/bin/bash"] - args: ["-c", "./liveness.sh; exit 0"] + args: ["-c", "./liveness.sh; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/run_litmus_test.yml b/e2e-tests/apps/busybox/liveness/run_e2e_test.yml similarity index 82% rename from e2e-tests/apps/busybox/liveness/run_litmus_test.yml rename to e2e-tests/apps/busybox/liveness/run_e2e_test.yml index 70d4c5c8..995ff346 100644 --- a/e2e-tests/apps/busybox/liveness/run_litmus_test.yml +++ b/e2e-tests/apps/busybox/liveness/run_e2e_test.yml @@ -2,14 +2,14 @@ apiVersion: batch/v1 kind: Job metadata: - generateName: litmus-busybox-liveness- - namespace: litmus + generateName: busybox-liveness- + namespace: e2e spec: activeDeadlineSeconds: 5400 template: metadata: - name: litmus-busybox-liveness - namespace: litmus + name: busybox-liveness + namespace: e2e labels: liveness: busybox-liveness @@ -17,7 +17,7 @@ spec: infra-aid: liveness spec: - serviceAccountName: litmus + serviceAccountName: e2e restartPolicy: Never containers: @@ -42,16 +42,16 @@ spec: # This is the namespace where busybox application is running - name: APP_NAMESPACE - value: '' + value: 'busybox' # Application label for busybox in `key=value` format - name: APP_LABEL - value: '' + value: 'app=busybox' # Use: `provision` to apply the liveness-probe checks for busybox application # Use: `deprovision` to deprovision the liveness-probe - name: ACTION - value: '' + value: 'provision' command: ["/bin/bash"] - args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/liveness/test.yml -i /etc/ansible/hosts -v; exit 0"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/liveness/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/test.yml b/e2e-tests/apps/busybox/liveness/test.yml index 339089b1..b0362203 100644 --- a/e2e-tests/apps/busybox/liveness/test.yml +++ b/e2e-tests/apps/busybox/liveness/test.yml @@ -20,8 +20,8 @@ when: lookup('env','RUN_ID') - ## RECORD START-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'SOT' @@ -89,8 +89,8 @@ always: - ## RECORD END-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/percona.yml b/e2e-tests/apps/percona/deployers/percona.yml index d75f5c11..51e19cd2 100644 --- a/e2e-tests/apps/percona/deployers/percona.yml +++ b/e2e-tests/apps/percona/deployers/percona.yml @@ -69,4 +69,4 @@ spec: - port: 3306 targetPort: 3306 selector: - lkey: lvalue + lkey: lvalue \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/run_litmus_test.yml b/e2e-tests/apps/percona/deployers/run_e2e_test.yml similarity index 76% rename from e2e-tests/apps/percona/deployers/run_litmus_test.yml rename to e2e-tests/apps/percona/deployers/run_e2e_test.yml index b8139bd1..16a0482a 100644 --- a/e2e-tests/apps/percona/deployers/run_litmus_test.yml +++ b/e2e-tests/apps/percona/deployers/run_e2e_test.yml @@ -2,17 +2,17 @@ apiVersion: batch/v1 kind: Job metadata: - generateName: litmus-percona-deploy- - namespace: litmus + generateName: percona-deploy- + namespace: e2e spec: template: metadata: - name: litmus + name: percona-deploy labels: app: percona-deployment spec: - serviceAccountName: litmus + serviceAccountName: e2e restartPolicy: Never containers: - name: ansibletest @@ -25,29 +25,28 @@ spec: # Name of the storage class to use for volume provisioning - name: STORAGE_CLASS - value: '' + value: 'openebs-lvmsc' # This is the namespace where percona application will be deployed - name: APP_NAMESPACE - value: '' + value: 'percona' # Application label for percona deployment in `key=value` format - name: APP_LABEL - value: '' + value: 'app=percona' # Application PVC name - name: APP_PVC - value: '' + value: 'percona-pvc' # Persistent volume storage capacity (for e.g, 5Gi) - name: PV_CAPACITY - value: '' + value: '5Gi' # Use: `provision` to deploy the application # Use: `deprovision` to deprovision the application - name: ACTION - value: '' + value: 'provision' command: ["/bin/bash"] - args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] - \ No newline at end of file + args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/test.yml b/e2e-tests/apps/percona/deployers/test.yml index e309a013..c6dd54c8 100644 --- a/e2e-tests/apps/percona/deployers/test.yml +++ b/e2e-tests/apps/percona/deployers/test.yml @@ -12,8 +12,8 @@ ## Generating the testname for deployment - include_tasks: /e2e-tests/hack/create_testname.yml - ## RECORD START-OF-TEST IN LITMUS RESULT CR - - include_tasks: "/e2e-tests/hack/update_litmus_result_resource.yml" + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: "/e2e-tests/hack/update_e2e_result_resource.yml" vars: status: 'SOT' @@ -51,7 +51,7 @@ flag: "Fail" always: - ## RECORD END-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'EOT' diff --git a/e2e-tests/apps/percona/deployers/test_vars.yml b/e2e-tests/apps/percona/deployers/test_vars.yml index 89bb78f1..874b695a 100644 --- a/e2e-tests/apps/percona/deployers/test_vars.yml +++ b/e2e-tests/apps/percona/deployers/test_vars.yml @@ -12,4 +12,4 @@ app_label: "{{ lookup('env','APP_LABEL') }}" app_pvc: "{{ lookup('env','APP_PVC') }}" -action: "{{ lookup('env','ACTION') }}" +action: "{{ lookup('env','ACTION') }}" \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/run_litmus_test.yml b/e2e-tests/apps/percona/workload/run_e2e_test.yml similarity index 85% rename from e2e-tests/apps/percona/workload/run_litmus_test.yml rename to e2e-tests/apps/percona/workload/run_e2e_test.yml index bfbb6549..9441188b 100644 --- a/e2e-tests/apps/percona/workload/run_litmus_test.yml +++ b/e2e-tests/apps/percona/workload/run_e2e_test.yml @@ -2,17 +2,17 @@ apiVersion: batch/v1 kind: Job metadata: - generateName: litmus-percona-loadgen- - namespace: litmus + generateName: percona-loadgen- + namespace: e2e spec: template: metadata: name: percona-loadgen - namespace: litmus + namespace: e2e labels: loadgen: percona-loadjob spec: - serviceAccountName: litmus + serviceAccountName: e2e restartPolicy: Never containers: - name: ansibletest @@ -24,10 +24,10 @@ spec: # This is the namespace where percona application is running - name: APP_NAMESPACE - value: '' + value: 'percona' - name: APP_LABEL - value: '' + value: 'app=percona' - name: LOADGEN_LABEL value: loadgen=percona-loadgen @@ -57,4 +57,4 @@ spec: value: "10" command: ["/bin/bash"] - args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/test.yml b/e2e-tests/apps/percona/workload/test.yml index e56baafe..55741d70 100644 --- a/e2e-tests/apps/percona/workload/test.yml +++ b/e2e-tests/apps/percona/workload/test.yml @@ -20,8 +20,8 @@ when: lookup('env','RUN_ID') - # RECORD START-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + # RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'SOT' @@ -83,7 +83,7 @@ flag: "Fail" always: - ## RECORD END-OF-TEST IN LITMUS RESULT CR - - include_tasks: /e2e-tests/hack/update_litmus_result_resource.yml + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml vars: status: 'EOT' diff --git a/e2e-tests/apps/percona/workload/test_vars.yml b/e2e-tests/apps/percona/workload/test_vars.yml index 76e2862e..03fe5a56 100644 --- a/e2e-tests/apps/percona/workload/test_vars.yml +++ b/e2e-tests/apps/percona/workload/test_vars.yml @@ -24,4 +24,4 @@ test_warmup_period: "{{ lookup('env','TPCC_WARMUP_PERIOD') }}" test_interval: "{{ lookup('env','LOAD_INTERVAL') }}" -tpcc_conf: tpcc.conf +tpcc_conf: tpcc.conf \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/tpcc.conf b/e2e-tests/apps/percona/workload/tpcc.conf index b5a0cfe5..98fd3d0c 100644 --- a/e2e-tests/apps/percona/workload/tpcc.conf +++ b/e2e-tests/apps/percona/workload/tpcc.conf @@ -6,4 +6,4 @@ "warmup_period": "test_warmup_period", "run_duration": "test_duration", "interval": "test_interval" -} +} \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/tpcc_bench.yml b/e2e-tests/apps/percona/workload/tpcc_bench.yml index 21a04504..0539a508 100644 --- a/e2e-tests/apps/percona/workload/tpcc_bench.yml +++ b/e2e-tests/apps/percona/workload/tpcc_bench.yml @@ -24,4 +24,4 @@ spec: volumes: - name: tpcc-configmap configMap: - name: tpcc-config + name: tpcc-config \ No newline at end of file diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md b/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md new file mode 100644 index 00000000..acdbd3e6 --- /dev/null +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md @@ -0,0 +1,38 @@ +## About this experiment + +This functional experiment scale up the lvm-controller replicas to use it in high availability mode and then verify the lvm-localpv behaviour when one of the replicas go down. This experiment checks the initial number of replicas of lvm-controller and scale it by one if a free node is present which should be able to schedule the pods. Default value for lvm-controller statefulset replica is one. + +## Supported platforms: + +K8s : 1.17+ + +OS : Ubuntu + +LVM version: LVM 2 + +## Entry-Criteria + +- k8s cluster should be in healthy state including all the nodes in ready state. +- lvm-controller and csi node-agent daemonset pods should be in running state. + +## Exit-Criteria + +- lvm-controller statefulset should be scaled up by one replica. +- All the replias should be in running state. +- lvm-localpv volumes should be healthy and data after scaling up controller should not be impacted. +- This experiment makes one of the lvm-controller statefulset replica to go down, as a result active/master replica of lvm-controller prior to the experiment will be changed to some other remaining replica after the experiment completes. This happens because of the lease mechanism, which is being used to decide which replica will be serving as master. At a time only one replica will be master and other replica will follow the anti-affinity rules so that these replica pods will be present on different nodes only. +- Volumes provisioning / deprovisioning should not be impacted if any one replica goes down. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of deploying lvm-localpv provisioner, clone openens/lvm-localpv[https://github.com/openebs/lvm-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f lvm-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f lvm-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. \ No newline at end of file diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/busybox_app.yml b/e2e-tests/experiments/functional/lvm-controller-high-availability/busybox_app.yml new file mode 100644 index 00000000..1ff297e6 --- /dev/null +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/busybox_app.yml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-busybox-ha + labels: + app: test_ha +spec: + selector: + matchLabels: + app: test_ha + template: + metadata: + labels: + app: test_ha + spec: + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: pvcha +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvcha +spec: + storageClassName: openebs-lvmsc + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/run_e2e_test.yml b/e2e-tests/experiments/functional/lvm-controller-high-availability/run_e2e_test.yml new file mode 100644 index 00000000..969583aa --- /dev/null +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/run_e2e_test.yml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: lvm-controller-high-availability- + namespace: e2e +spec: + template: + metadata: + labels: + name: lvm-controller-high-availability + spec: + serviceAccountName: e2e + restartPolicy: Never + + containers: + - name: ansibletest + image: openebs/lvm-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + #value: log_plays + #value: actionable + value: default + + # This is the namespace where the LVM driver will create all its resources. + # By default it is in openebs namespace. If we want to change it to use a different + # namespace change the value of this env with desired namespace name. + - name: OPERATOR_NAMESPACE + value: 'openebs' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml b/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml new file mode 100644 index 00000000..a7234916 --- /dev/null +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml @@ -0,0 +1,229 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for lvm localpv controller high-availability test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Get the no of replicas in lvm-controller statefulset + shell: > + kubectl get sts openebs-lvm-controller -n kube-system -o jsonpath='{.status.replicas}' + args: + executable: /bin/bash + register: lvm_ctrl_replicas + + - name: Get the list of names of all the nodes in cluster + shell: > + kubectl get nodes --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: node_list + + - name: Get the count of the schedulable nodes, which don't have `NoSchedule` taints + shell: > + kubectl get nodes --no-headers -o custom-columns=:.spec.taints + | grep -v NoSchedule | wc -l + args: + executable: /bin/bash + register: no_of_Schedulable_nodes + + - name: scale down the replicas to zero of lvm-controller statefulset + shell: > + kubectl scale sts openebs-lvm-controller -n kube-system --replicas=0 + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: check that lvm-controller pods has been terminated successfully + shell: > + kubectl get pods -n kube-system -l app=openebs-lvm-controller + args: + executable: /bin/bash + register: ctrl_pods + until: "'No resources found' in ctrl_pods.stderr" + delay: 3 + retries: 25 + + - name: Provision a test volume when lvm-controller is not active + shell: > + kubectl apply -f busybox_app.yml + args: + executable: /bin/bash + + - name: check the pvc status, it should be in pending state + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Pending' not in pvc_status.stdout" + + - name: Manual wait for 15 seconds, pvc should not get bound in this time + shell: sleep 15 + + - name: again check the pvc status + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Pending' not in pvc_status.stdout" + + - name: scale up the lvm-controller statefulset replica + shell: > + kubectl scale sts openebs-lvm-controller -n kube-system + --replicas="{{ lvm_ctrl_replicas.stdout|int + 1 }}" + args: + executable: /bin/bash + failed_when: "{{ lvm_ctrl_replicas.stdout|int + 1 }} > {{no_of_Schedulable_nodes.stdout|int}}" + + - name: check that lvm-controller statefulset replicas are up and running + shell: > + kubectl get pods -n kube-system -l app=openebs-lvm-controller --no-headers + -o custom-columns=:.status.phase | grep Running | wc -l + args: + executable: /bin/bash + register: ready_replicas + until: "{{ ready_replicas.stdout|int }} == {{ lvm_ctrl_replicas.stdout|int + 1 }}" + delay: 3 + retries: 30 + + - name: check the pvc status after lvm controller is up and running + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + until: "'Bound' in pvc_status.stdout" + delay: 5 + retries: 30 + + - name: Get the application pod name + shell: > + kubectl get pods -n e2e -o jsonpath='{.items[?(@.metadata.labels.app=="test_ha")].metadata.name}' + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod is in running state. + shell: > + kubectl get pods -n e2e -o jsonpath='{.items[?(@.metadata.labels.app=="test_ha")].status.phase}' + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 5 + retries: 20 + + - name: Get the lvm-volume name from the pvc name + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: lvmvol_name + + - name: Get the name of the controller pod replica which is active as master at present + shell: > + kubectl get lease local-csi-openebs-io -n kube-system -o jsonpath='{.spec.holderIdentity}' + args: + executable: /bin/bash + register: master_replica + + - name: Taint all nodes with `NoSchedule` to keep replica {{ master_replica.stdout }} out of action + shell: > + kubectl taint node {{ item }} key=value:NoSchedule + args: + executable: /bin/bash + register: taint_status + until: "'tainted' in taint_status.stdout " + retries: 20 + delay: 5 + with_items: "{{ node_list.stdout_lines }}" + + - name: Delete the {{ master_replica.stdout }} replica pod + shell: > + kubectl delete pod {{ master_replica.stdout }} -n kube-system + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Get the new replica name which is in action as master for lvm-controller + shell: > + kubectl get lease local-csi-openebs-io -n kube-system -o jsonpath='{.spec.holderIdentity}' + args: + executable: /bin/bash + register: new_master_replica + retries: 30 + delay: 5 + until: master_replica.stdout != new_master_replica.stdout + + - name: Deprovision the application + shell: > + kubectl delete -f busybox_app.yml + args: + executable: /bin/bash + + - name: Verify that application pods have been deleted successfully + shell: > + kubectl get pods -n e2e + args: + executable: /bin/bash + register: app_pod + until: "'{{ app_pod_name.stdout }}' not in app_pod.stdout" + delay: 3 + retries: 30 + + - name: verify that pvc has been deleted successfully + shell: > + kubectl get pvc -n e2e + args: + executable: /bin/bash + register: pvc_status + until: "'pvcha' not in pvc_status.stdout" + delay: 5 + retries: 30 + + - name: verify that lvmvol has been deleted successfully + shell: > + kubectl get lvmvol -n {{ operator_ns }} + args: + executable: /bin/bash + register: lvmvol_status + until: "lvmvol_name.stdout not in lvmvol_status.stdout" + delay: 3 + retries: 30 + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + + - name: Remove the taint from the nodes + shell: > + kubectl taint node {{ item }} key- + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + with_items: "{{ node_list.stdout_lines }}" + ignore_errors: true + + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/test_vars.yml b/e2e-tests/experiments/functional/lvm-controller-high-availability/test_vars.yml new file mode 100644 index 00000000..3ae5e23a --- /dev/null +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/test_vars.yml @@ -0,0 +1,3 @@ +test_name: lvm-controller-high-availability + +operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/Dockerfile b/e2e-tests/experiments/lvm-localpv-provisioner/Dockerfile new file mode 100644 index 00000000..a5b8e3b2 --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/Dockerfile @@ -0,0 +1,28 @@ +# Copyright 2019-2020 The OpenEBS Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:18.04 + +RUN apt-get update + +RUN apt-get install lvm2 -y + +CMD [ "bash" ] + +########################################################################## +# This Dockerfile is used to create the image `w3aman/lvmutils:ci` # +# which is being used in the daemonset in the file `lvm_utils_ds.yml` # +# Here we install lvm utils in the image so that lvm command can be run # +# from the container, mainly to create volume groups on nodes. # +########################################################################## \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/README.md b/e2e-tests/experiments/lvm-localpv-provisioner/README.md new file mode 100644 index 00000000..6ec4e3d3 --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/README.md @@ -0,0 +1,38 @@ +## About this experiment + +This experiment deploys the lvm-localpv provisioner in kube-system namespace which includes lvm-controller and csi-node agent deamonset. Apart from this, volume group creation and generic use-case storage-classes for dynamic provisioning of the volumes based on values provided of env's gets created in this experiment. + +## Supported platforms: + +K8S : 1.17+ + +OS : Ubuntu + +LVM version : LVM 2 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all the nodes in ready state. +- If we don't want to use this experiment to deploy lvm-localpv provisioner, we can directly apply the lvm-localpv operator file as mentioned below and make sure you have volume groups are created on desired nodes to provision volumes. + +``` +kubectl apply -f https://raw.githubusercontent.com/openebs/lvm-localpv/master/deploy/lvm-operator.yaml +``` + +## Exit-Criteria + +- lvm-localpv driver components should be deployed successfully and all the pods including lvm-controller and csi node-agent daemonset are in running state. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of deploying lvm-localpv provisioner, clone openens/lvm-localpv[https://github.com/openebs/lvm-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f lvm-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f lvm-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml b/e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml new file mode 100644 index 00000000..685d49bd --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml @@ -0,0 +1,42 @@ +################################################################### +# In case when we don't have the direct access to cluster nodes, # +# for that time we can apply this daemonset with privileged # +# access to create volume groups on nodes by running the commands # +# from this daemonset pod scheduled on that particular node. # +################################################################### + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: lvm-vg-creation +spec: + selector: + matchLabels: + app: lvm-vg + template: + metadata: + labels: + app: lvm-vg + spec: + #nodeSelector: + #test: lvm-vg + containers: + - name: lvm-vg-creation + image: w3aman/lvmutils:ci + imagePullPolicy: IfNotPresent + command: ['sh', '-c', 'echo Hello! && sleep 1800'] + volumeMounts: + - name: udev + mountPath: /run/udev + - name: device + mountPath: /dev + securityContext: + privileged: true + tty: true + volumes: + - hostPath: + path: /run/udev + name: udev + - hostPath: + path: /dev + name: device \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/openebs-lvmsc.j2 b/e2e-tests/experiments/lvm-localpv-provisioner/openebs-lvmsc.j2 new file mode 100644 index 00000000..f2560e60 --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/openebs-lvmsc.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-lvmsc +allowVolumeExpansion: true +parameters: + volgroup: "{{ vg_name }}" +provisioner: local.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-lvmsc-wfc +allowVolumeExpansion: true +volumeBindingMode: WaitForFirstConsumer +parameters: + volgroup: "{{ vg_name }}" +provisioner: local.csi.openebs.io \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/run_e2e_test.yml b/e2e-tests/experiments/lvm-localpv-provisioner/run_e2e_test.yml new file mode 100644 index 00000000..be32fb7e --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/run_e2e_test.yml @@ -0,0 +1,82 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: lvm-localpv-provisioner- + namespace: e2e +spec: + template: + metadata: + name: lvm-localpv-provisioner + labels: + app: lvm-localpv + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/lvm-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This test will download the lvm-localpv operator file from this branch. + # Change the env value according to versioned branch name for lvm-localpv provisioner + # from openebs/lvm-localpv repo. for e.g. (v0.1.x , v0.2.x OR master) + # by default test-specific value of `LVM_BRANCH` is master. + - name: LVM_BRANCH + value: 'master' + + # In each branch of openebs/lvm-localpv repo lvm-localpv driver is set to `ci` tag + # `openebs/lvm-driver:ci`. Give the full image name here with desired image tag to replace + # it with `ci` tag. for e.g. (openebs/lvm-driver:0.1.0). Leaving this env empty will + # apply the operator yaml with by default present `ci` tag i.e. `openebs/lvm-driver:ci` + - name: LVM_DRIVER_IMAGE + value: '' + + # This is the namespace where the LVM driver will create all its resources. + # By default it is in openebs namespace. If we want to change it to use a different + # namespace change the value of this env with desired namespace name. + - name: OPERATOR_NAMESPACE + value: 'openebs' + + # In addition to provisioning of lvm-localpv driver if we want to create volume groups + # on the nodes, use `true` as the value for this env else leave it blank. + # by default this test will skip volume group creation of nodes. + - name: VOLUME_GROUP_CREATION + value: '' + + # In case if we have use value as `true` in `VOLUME_GROUP_CREATION` env, provide here + # the name for volume group by which name it will be created via this test else leave blank. + # If we don't want to create volume group on nodes via this test but still + # wants to create some generally used storage_classes for provisioning of lvm volumes + # provide here the volume group name which you have already setted up and it will be + # used in storage class template. + # by default test-specific value of volume group name is `lvmvg`. + - name: VOLUME_GROUP_NAME + value: 'lvmvg' + + # In case if we have use value as `true` in `VOLUME_GROUP_CREATION` env, provide here + # the name of the disks to use as the physical volume disks to create volume group on + # top of them, else leave blank. for e.g. `/dev/sdb` + # If we want to use more than one disk give the names in space seperated format + # for e.g. "/dev/sdb /dev/sdc" + - name: PHYSICAL_VOLUME_DISKS + value: '' + + # In case if we have use value as `true` in `VOLUME_GROUP_CREATION` env, provide here + # the name of nodes on which we want volume group to be created. Leaving this blank + # will create volume group on all the schedulabel nodes. + # Provide node names in comma seperated format for e.g. ('node-1,node-2,node-3') + - name: NODE_NAMES + value: '' + + # If we want to create some generally used storage_classes for provisioning of lvm volumes + # provide `true` as the value for this env. by default test-specific value for this env is + # blank that means it will not create any storage-class in execution of this test. + - name: STORAGE_CLASS_CREATION + value: '' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/lvm-localpv-provisioner/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/run_litmus_test.yml b/e2e-tests/experiments/lvm-localpv-provisioner/run_litmus_test.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/test.yml b/e2e-tests/experiments/lvm-localpv-provisioner/test.yml index e69de29b..c5c12f39 100644 --- a/e2e-tests/experiments/lvm-localpv-provisioner/test.yml +++ b/e2e-tests/experiments/lvm-localpv-provisioner/test.yml @@ -0,0 +1,97 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for lvm localpv provisioner test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Create volume group on desired worker nodes + include_tasks: /e2e-tests/experiments/lvm-localpv-provisioner/vg_creation.yml + when: vg_creation == 'true' + + - name: Download lvm-localpv operator file + get_url: + url: https://raw.githubusercontent.com/openebs/lvm-localpv/{{ lvm_branch }}/deploy/lvm-operator.yaml + dest: ./lvm_operator.yml + force: yes + register: status + until: "'OK' in status.msg" + delay: 5 + retries: 3 + + - name: Update the namespace where we want to create LVM_DRIVER resources + shell: > + sed -i -e "/name: LVM_NAMESPACE/{n;s/value: openebs/value: {{ operator_ns }}/g}" lvm_operator.yml && + sed -z "s/kind: Namespace\nmetadata:\n name: openebs/kind: Namespace\nmetadata:\n name: {{ operator_ns }}/" -i lvm_operator.yml + args: + executable: /bin/bash + register: update_status + failed_when: "update_status.rc != 0" + when: "operator_ns != 'openebs'" + + - name: Update the openebs lvm-localpv image tag + replace: + path: ./lvm_operator.yml + regexp: openebs/lvm-driver:ci + replace: "{{ lookup('env','LVM_DRIVER_IMAGE') }}" + when: lookup('env','LVM_DRIVER_IMAGE') | length > 0 + + - name: Apply the lvm-localpv operator file + shell: > + kubectl apply -f ./lvm_operator.yml + args: + executable: /bin/bash + + - name: Verify that the lvm-controller and node-agent pods are in running state + shell: > + kubectl get pods -n kube-system -l role=openebs-lvm + --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: lvm_driver_components + until: "lvm_driver_components.stdout == 'Running'" + delay: 5 + retries: 40 + + - block: + + - name: Update storage class template variables with test-specific values + template: + src: openebs-lvmsc.j2 + dest: openebs-lvmsc.yml + + - name: Apply storage class yamls + shell: > + kubectl apply -f openebs-lvmsc.yml + args: + executable: /bin/bash + register: sc_result + failed_when: "sc_result.rc != 0" + + when: "{{ lookup('env','STORAGE_CLASS_CREATION') }} == true" + + - set_fact: + flag: "Pass" + + rescue: + - name: Setting fail flag + set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/test_vars.yml b/e2e-tests/experiments/lvm-localpv-provisioner/test_vars.yml index e69de29b..45887619 100644 --- a/e2e-tests/experiments/lvm-localpv-provisioner/test_vars.yml +++ b/e2e-tests/experiments/lvm-localpv-provisioner/test_vars.yml @@ -0,0 +1,15 @@ +test_name: lvm-localpv-provisioner + +lvm_branch: "{{ lookup('env','LVM_BRANCH') }}" + +lvm_driver_image: "{{ lookup('env','LVM_DRIVER_IMAGE') }}" + +operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}" + +vg_creation: "{{ lookup('env','VOLUME_GROUP_CREATION') }}" + +vg_name: "{{ lookup('env','VOLUME_GROUP_NAME') }}" + +pv_disks: "{{ lookup('env','PHYSICAL_VOLUME_DISKS') }}" + +node_names: "{{ lookup('env','NODE_NAMES') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/lvm-localpv-provisioner/vg_creation.yml b/e2e-tests/experiments/lvm-localpv-provisioner/vg_creation.yml new file mode 100644 index 00000000..766b475f --- /dev/null +++ b/e2e-tests/experiments/lvm-localpv-provisioner/vg_creation.yml @@ -0,0 +1,103 @@ +--- +- name: Get the list of nodes for volume group creation + set_fact: + node_list: "{{ node_names.split(',') }}" + +- block: + + - name: Label the nodes for DaemonSet pods to schedule on it + shell: > + kubectl label node {{ item }} test=lvm-vg + args: + executable: /bin/bash + register: label_status + failed_when: "label_status.rc != 0" + with_items: "{{ node_list }}" + + - name: Update the DaemonSet yaml to use nodes label selector + shell: > + sed -i -e "s|#nodeSelector|nodeSelector|g" \ + -e "s|#test: lvm-vg|test: lvm-vg|g" /e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + when: "node_names != ''" + +- name: Create a DaemonSet with privileged access for volume group creation on nodes + shell: > + kubectl apply -f /e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml + args: + executable: /bin/bash + +- name: Check if DaemonSet pods are in running state on each nodes + shell: > + kubectl get pods -l app=lvm-vg + --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: result + until: "result.stdout == 'Running'" + delay: 3 + retries: 40 + +- block: + + - name: Get the list of DaemonSet pods + shell: > + kubectl get pods -n e2e -l app=lvm-vg --no-headers + -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: ds_pods_list + + - name: Install lvm utils on each node and Create volume group + shell: > + kubectl exec -ti {{ item }} -- bash -c 'apt-get install lvm2 -y && pvcreate {{ pv_disks }} && vgcreate {{ vg_name }} {{ pv_disks }}' + args: + executable: /bin/bash + register: vg_status_all_nodes + failed_when: "vg_status_all_nodes.rc != 0" + with_items: "{{ ds_pods_list.stdout_lines }}" + + when: "node_names == ''" + +- block: + + - name: Get the list of DaemonSet pods which are scheduled on desired nodes for volume group creation + shell: > + kubectl get pods -n e2e -l app=lvm-vg + -o jsonpath='{.items[?(@.spec.nodeName==''"{{ item }}"'')].metadata.name}' + args: + executable: /bin/bash + register: ds_pods + with_items: "{{ node_list }}" + + - name: Install lvm utils on nodes and Create volume group + shell: > + kubectl exec -ti {{ item.stdout }} -- bash -c 'apt-get install lvm2 -y && pvcreate {{ pv_disks }} && vgcreate {{ vg_name }} {{ pv_disks }}' + args: + executable: /bin/bash + register: vg_status + failed_when: "vg_status.rc != 0" + with_items: "{{ ds_pods.results }}" + + when: "node_names != ''" + +- name: Delete the DaemonSet + shell: > + kubectl delete -f /e2e-tests/experiments/lvm-localpv-provisioner/lvm_utils_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + +- name: Remove the label from nodes + shell: > + kubectl label node {{ item }} test- + args: + executable: /bin/bash + register: label_status + failed_when: "label_status.rc != 0" + with_items: "{{ node_list }}" \ No newline at end of file diff --git a/e2e-tests/hack/crds.yaml b/e2e-tests/hack/crds.yaml index bdcb1d48..673273c7 100644 --- a/e2e-tests/hack/crds.yaml +++ b/e2e-tests/hack/crds.yaml @@ -3,21 +3,21 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: # name must match the spec fields below, and be in the form: . - name: litmusresults.litmus.io + name: e2eresults.e2e.io spec: # group name to use for REST API: /apis// - group: litmus.io + group: e2e.io # version name to use for REST API: /apis// version: v1alpha1 # either Namespaced or Cluster scope: Cluster names: # plural name to be used in the URL: /apis/// - plural: litmusresults + plural: e2eresults # singular name to be used as an alias on the CLI and for display - singular: litmusresult + singular: e2eresult # kind is normally the CamelCased singular type. Your resource manifests use this. - kind: LitmusResult + kind: e2eResult # shortNames allow shorter string to match your resource name on the CLI shortNames: - - lr + - e2er diff --git a/e2e-tests/hack/e2e-result.j2 b/e2e-tests/hack/e2e-result.j2 new file mode 100644 index 00000000..2f26b479 --- /dev/null +++ b/e2e-tests/hack/e2e-result.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: e2e.io/v1alpha1 +kind: e2eResult +metadata: + # name of the e2e testcase + name: {{ test }} +spec: + testStatus: + phase: {{ phase }} + result: {{ verdict }} \ No newline at end of file diff --git a/e2e-tests/hack/litmus-result.j2 b/e2e-tests/hack/litmus-result.j2 deleted file mode 100644 index 19ad9126..00000000 --- a/e2e-tests/hack/litmus-result.j2 +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: litmus.io/v1alpha1 -kind: LitmusResult -metadata: - - # name of the litmus testcase - name: {{ test }} -spec: - - # holds information on the testcase - testMetadata: - app: {{ app }} - chaostype: {{ chaostype }} - - # holds the state of testcase, manually updated by json merge patch - # result is the useful value today, but anticipate phase use in future - testStatus: - phase: {{ phase }} - result: {{ verdict }} - diff --git a/e2e-tests/hack/push b/e2e-tests/hack/push deleted file mode 100755 index 124eef63..00000000 --- a/e2e-tests/hack/push +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e - -if [ -z "${REPONAME}" ] -then - REPONAME="openebs" -fi - -if [ -z "${IMGNAME}" ] || [ -z "${IMGTAG}" ]; -then - echo "Image details are missing. Nothing to push."; - exit 1 -fi - -IMAGEID=$( sudo docker images -q ${REPONAME}/${IMGNAME}:${IMGTAG} ) - -if [ ! -z "${DNAME}" ] && [ ! -z "${DPASS}" ]; -then - sudo docker login -u "${DNAME}" -p "${DPASS}"; - # Push image to docker hub - echo "Pushing ${REPONAME}/${IMGNAME}:${IMGTAG} ..."; - sudo docker push ${REPONAME}/${IMGNAME}:${IMGTAG} ; - if [ ! -z "${TRAVIS_TAG}" ] ; - then - # Push with different tags if tagged as a release - # When github is tagged with a release, then Travis will - # set the release tag in env TRAVIS_TAG - echo "Pushing ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} ..."; - sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} - sudo docker push ${REPONAME}/${IMGNAME}:${TRAVIS_TAG}; - echo "Pushing ${REPONAME}/${IMGNAME}:latest ..."; - sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:latest - sudo docker push ${REPONAME}/${IMGNAME}:latest; - fi; -else - echo "No docker credentials provided. Skip uploading ${REPONAME}/${IMGNAME}:${IMGTAG} to docker hub"; -fi; diff --git a/e2e-tests/hack/rbac.yaml b/e2e-tests/hack/rbac.yaml index 0ce7f5fc..5d1ee775 100644 --- a/e2e-tests/hack/rbac.yaml +++ b/e2e-tests/hack/rbac.yaml @@ -1,23 +1,22 @@ apiVersion: v1 kind: Namespace metadata: - name: litmus + name: e2e --- apiVersion: v1 kind: ServiceAccount metadata: - name: litmus - namespace: litmus + name: e2e + namespace: e2e labels: - name: litmus + name: e2e --- -# Source: openebs/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: - name: litmus + name: e2e labels: - name: litmus + name: e2e rules: - apiGroups: ["*"] resources: ["*"] @@ -26,14 +25,14 @@ rules: apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: - name: litmus + name: e2e labels: - name: litmus + name: e2e roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: litmus + name: e2e subjects: - kind: ServiceAccount - name: litmus - namespace: litmus + name: e2e + namespace: e2e \ No newline at end of file diff --git a/e2e-tests/hack/update_e2e_result_resource.yml b/e2e-tests/hack/update_e2e_result_resource.yml new file mode 100644 index 00000000..c5fcabf6 --- /dev/null +++ b/e2e-tests/hack/update_e2e_result_resource.yml @@ -0,0 +1,45 @@ +--- +- block: + - name: Generate the e2e result CR to reflect SOT (Start of Test) + template: + src: e2e-result.j2 + dest: e2e-result.yaml + vars: + test: "{{ test_name }}" + phase: in-progress + verdict: none + + - name: Analyze the e2e cr yaml + shell: cat e2e-result.yaml + + - name: Apply the e2e result CR + k8s: + state: present + src: e2e-result.yaml + register: e2er_status + failed_when: "e2er_status is failed" + + when: status == "SOT" + +- block: + - name: Generate the e2e result CR to reflect EOT (End of Test) + template: + src: e2e-result.j2 + dest: e2e-result.yaml + vars: + test: "{{ test_name }}" + phase: completed + verdict: "{{ flag }}" + + - name: Analyze the e2e cr yaml + shell: cat e2e-result.yaml + + - name: Apply the e2e result CR + k8s: + state: present + src: e2e-result.yaml + merge_type: merge + register: e2er_status + failed_when: "e2er_status is failed" + + when: status == "EOT" \ No newline at end of file diff --git a/e2e-tests/hack/update_litmus_result_resource.yml b/e2e-tests/hack/update_litmus_result_resource.yml deleted file mode 100644 index 477783b0..00000000 --- a/e2e-tests/hack/update_litmus_result_resource.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- block: - - name: Generate the litmus result CR to reflect SOT (Start of Test) - template: - src: litmus-result.j2 - dest: litmus-result.yaml - vars: - test: "{{ test_name }}" - app: "" - chaostype: "" - phase: in-progress - verdict: none - - - name: Analyze the cr yaml - shell: cat litmus-result.yaml - - - name: Apply the litmus result CR - k8s: - state: present - src: litmus-result.yaml - register: lr_status - failed_when: "lr_status is failed" - - when: status == "SOT" - -- block: - - name: Generate the litmus result CR to reflect EOT (End of Test) - template: - src: litmus-result.j2 - dest: litmus-result.yaml - vars: - test: "{{ test_name }}" - app: "" - chaostype: "" - phase: completed - verdict: "{{ flag }}" - - - name: Analyze the cr yaml - shell: cat litmus-result.yaml - - - name: Apply the litmus result CR - k8s: - state: present - src: litmus-result.yaml - merge_type: merge - register: lr_status - failed_when: "lr_status is failed" - - when: status == "EOT" diff --git a/e2e-tests/utils/k8s/create_ns.yml b/e2e-tests/utils/k8s/create_ns.yml index 0eb42f85..4b2fc8de 100644 --- a/e2e-tests/utils/k8s/create_ns.yml +++ b/e2e-tests/utils/k8s/create_ns.yml @@ -10,7 +10,7 @@ shell: kubectl create ns {{ app_ns }} args: executable: /bin/bash - when: app_ns != 'litmus' and app_ns not in ns_list.stdout_lines + when: app_ns != 'e2e' and app_ns not in ns_list.stdout_lines # Check status of namespace -- include_tasks: /e2e-tests/utils/k8s/status_testns.yml +- include_tasks: /e2e-tests/utils/k8s/status_testns.yml \ No newline at end of file