diff --git a/manifests/.gitignore b/manifests/.gitignore index 0c8b9cdf1..7f3592412 100644 --- a/manifests/.gitignore +++ b/manifests/.gitignore @@ -2,3 +2,4 @@ arango-deployment-dev.yaml arango-deployment-replication-dev.yaml arango-storage-dev.yaml arango-test-dev.yaml +arango-crd-dev.yaml diff --git a/tests/acceptance/cluster-local-storage.template.yaml b/tests/acceptance/cluster-local-storage.template.yaml index 65a32bdaf..1f6b9136c 100644 --- a/tests/acceptance/cluster-local-storage.template.yaml +++ b/tests/acceptance/cluster-local-storage.template.yaml @@ -8,4 +8,7 @@ spec: externalAccess: type: LoadBalancer mode: Cluster - storageClassName: acceptance + agents: + storageClassName: acceptance + dbservers: + storageClassName: acceptance diff --git a/tests/acceptance/cluster-sync.template.yaml b/tests/acceptance/cluster-sync.template.yaml index b0c15f547..0358ee95d 100644 --- a/tests/acceptance/cluster-sync.template.yaml +++ b/tests/acceptance/cluster-sync.template.yaml @@ -10,3 +10,6 @@ spec: mode: Cluster sync: enabled: true + externalAccess: + type: LoadBalancer + accessPackageSecretNames: ["src-accesspackage"] diff --git a/tests/acceptance/cluster-sync2.template.yaml b/tests/acceptance/cluster-sync2.template.yaml new file mode 100644 index 000000000..bfeb39651 --- /dev/null +++ b/tests/acceptance/cluster-sync2.template.yaml @@ -0,0 +1,14 @@ +apiVersion: "database.arangodb.com/v1alpha" +kind: "ArangoDeployment" +metadata: + name: "acceptance-cluster2" +spec: + environment: @ENVIRONMENT@ + image: @IMAGE@ + externalAccess: + type: LoadBalancer + mode: Cluster + sync: + enabled: true + externalAccess: + type: LoadBalancer diff --git a/tests/acceptance/generate.sh b/tests/acceptance/generate.sh index aae318ac4..b922a5909 100755 --- a/tests/acceptance/generate.sh +++ b/tests/acceptance/generate.sh @@ -5,6 +5,8 @@ version="arangodb-preview:3.4.0-rc.3" enterprise_secret="$ARANGO_EP_SECRET" #only the number community="arangodb/$version" enterprise="registry.arangodb.com/arangodb/$version-$enterprise_secret" +community="neunhoef/arangodb:3.4" +enterprise="neunhoef/arangodb:3.4" rm -fr generated mkdir -p generated diff --git a/tests/acceptance/local-storage.template.yaml b/tests/acceptance/local-storage.template.yaml new file mode 100644 index 000000000..569221d93 --- /dev/null +++ b/tests/acceptance/local-storage.template.yaml @@ -0,0 +1,9 @@ +apiVersion: "storage.arangodb.com/v1alpha" +kind: "ArangoLocalStorage" +metadata: + name: "acceptance-local-storage" +spec: + storageClass: + name: acceptance + localPath: + - /var/lib/acceptance-test diff --git a/tests/acceptance/semiautomation/README.md b/tests/acceptance/semiautomation/README.md new file mode 100644 index 000000000..3c2ddd7d6 --- /dev/null +++ b/tests/acceptance/semiautomation/README.md @@ -0,0 +1,18 @@ +# Semiautomation for the acceptance test + +This is a collection of tools to perform the acceptance test faster. + +## Prerequisites + + - k8s cluster set up with `kubectl` + - `fish` shell installed + - `curl` installed + - Obi's generated templates in a subdirectory called `generated` + +## Usage + +Execute the tests like this: + + ./test1a.fish + +and follow the instructions. diff --git a/tests/acceptance/semiautomation/helper.fish b/tests/acceptance/semiautomation/helper.fish new file mode 100644 index 000000000..d4efaecf6 --- /dev/null +++ b/tests/acceptance/semiautomation/helper.fish @@ -0,0 +1,93 @@ +function printheader + echo "Test : $TESTNAME" + echo "Description : $TESTDESC" + echo "Yaml file : $YAMLFILE" + echo "Deployment name : $DEPLOYMENT" + echo +end + +function waitForKubectl + if test (count $argv) -lt 5 + return 1 + end + set -l op (string split -- " " $argv[1]) + set -l select $argv[2] + set -l good (string split -- ";" "$argv[3]") + set -l expected $argv[4] + set -l timeout $argv[5] + + echo + echo "Testing `kubectl $op`" + echo " for occurrences of `$select`" + echo " that are `$good`, expecting `$expected`" + echo + + set -l t 0 + while true + set -l l (kubectl $op | grep $select) + set -l nfound (count $l) + set -l ngood 0 + for line in $l + if string match -r $good $line > /dev/null + set ngood (math $ngood + 1) + end + end + echo -n "Good=$ngood, found=$nfound, expected=$expected, try $t ($timeout)" + echo -n -e "\r" + if test $ngood -eq $expected -a $nfound -eq $expected ; echo ; return 0 ; end + if test $t -gt $timeout ; echo ; echo Timeout ; return 2 ; end + set t (math $t + 1) + sleep 1 + end +end + +function output + if which say > /dev/null + say $argv[1] > /dev/null ^ /dev/null + end + echo + for l in $argv[2..-1] ; echo $l ; end +end + +function log + echo "$argv[1] Test: $TESTNAME, Desc: $TESTDESC" >> testprotocol.log +end + +function inputAndLogResult + read -P "Test result: " result + log $result + echo +end + +function waitForUser + read -P "Hit enter to continue" +end + +function getLoadBalancerIP + string trim -c '"' (kubectl get service $argv[1] -o=json | \ + jq .status.loadBalancer.ingress[0].ip) +end + +function testArangoDB + set -l ip $argv[1] + set -l timeout $argv[2] + set -l n 0 + echo Waiting for ArangoDB to be ready... + while true + if set v (curl -k -s -m 3 "https://$ip:8529/_api/version" --user root: | jq .server) + if test "$v" = '"arango"' ; return 0 ; end + end + set n (math $n + 1) + if test "$n" -gt "$timeout" + echo Timeout + return 1 + end + echo Waiting "$n($timeout)"... + sleep 1 + end +end + +function fail + output "Failed" $argv + exit 1 +end diff --git a/tests/acceptance/semiautomation/replication.yaml b/tests/acceptance/semiautomation/replication.yaml new file mode 100644 index 000000000..53a6085f6 --- /dev/null +++ b/tests/acceptance/semiautomation/replication.yaml @@ -0,0 +1,13 @@ +apiVersion: "replication.database.arangodb.com/v1alpha" +kind: "ArangoDeploymentReplication" +metadata: + name: "replication-internal" +spec: + source: + masterEndpoint: ["https://@ADDRESS@:8629"] + auth: + keyfileSecretName: src-accesspackage-auth + tls: + caSecretName: src-accesspackage-ca + destination: + deploymentName: "acceptance-cluster2" diff --git a/tests/acceptance/semiautomation/test1a.fish b/tests/acceptance/semiautomation/test1a.fish new file mode 100755 index 000000000..f3dba507f --- /dev/null +++ b/tests/acceptance/semiautomation/test1a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1a +set -g TESTDESC "Deployment of mode single (development)" +set -g YAMLFILE generated/single-community-dev.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1b.fish b/tests/acceptance/semiautomation/test1b.fish new file mode 100755 index 000000000..225a2f922 --- /dev/null +++ b/tests/acceptance/semiautomation/test1b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1b +set -g TESTDESC "Deployment of mode active/failover (development)" +set -g YAMLFILE generated/activefailover-community-dev.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1c.fish b/tests/acceptance/semiautomation/test1c.fish new file mode 100755 index 000000000..d72dec545 --- /dev/null +++ b/tests/acceptance/semiautomation/test1c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1c +set -g TESTDESC "Deployment of mode cluster (development, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1d.fish b/tests/acceptance/semiautomation/test1d.fish new file mode 100755 index 000000000..08b53edb7 --- /dev/null +++ b/tests/acceptance/semiautomation/test1d.fish @@ -0,0 +1,38 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1d +set -g TESTDESC "Deployment of mode cluster with sync (development, enterprise)" +set -g YAMLFILE generated/cluster-sync-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2a.fish b/tests/acceptance/semiautomation/test2a.fish new file mode 100755 index 000000000..80f55634b --- /dev/null +++ b/tests/acceptance/semiautomation/test2a.fish @@ -0,0 +1,50 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test2a +set -g TESTDESC "Scale an active failover deployment (enterprise)" +set -g YAMLFILE generated/activefailover-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Scale up the deployment +output "Next" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":3}]' +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 6 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 2 120 +or fail "Patched deployment did not get ready." + +# Scale down the deployment +output "Next" "Patching Spec for Scaling down" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":2}]' +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +or fail "Patched deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2b.fish b/tests/acceptance/semiautomation/test2b.fish new file mode 100755 index 000000000..0dd401b30 --- /dev/null +++ b/tests/acceptance/semiautomation/test2b.fish @@ -0,0 +1,71 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test2b +set -g TESTDESC "Scale a cluster deployment (development, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Patching +output "Scaling db servers up" "Patching Spec for Scaling up DBservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":5}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 11 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":4}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 12 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":1}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 6 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 1 120 +or fail "Deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3a.fish b/tests/acceptance/semiautomation/test3a.fish new file mode 100755 index 000000000..1d38c85ee --- /dev/null +++ b/tests/acceptance/semiautomation/test3a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3a +set -g TESTDESC "Deployment of mode single (production)" +set -g YAMLFILE generated/single-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3b.fish b/tests/acceptance/semiautomation/test3b.fish new file mode 100755 index 000000000..14e3aee1a --- /dev/null +++ b/tests/acceptance/semiautomation/test3b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3b +set -g TESTDESC "Deployment of mode active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3c.fish b/tests/acceptance/semiautomation/test3c.fish new file mode 100755 index 000000000..847e38beb --- /dev/null +++ b/tests/acceptance/semiautomation/test3c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3c +set -g TESTDESC "Deployment of mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3d.fish b/tests/acceptance/semiautomation/test3d.fish new file mode 100755 index 000000000..c8f58c5cc --- /dev/null +++ b/tests/acceptance/semiautomation/test3d.fish @@ -0,0 +1,71 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3d +set -g TESTDESC "Scale a cluster deployment (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Patching +output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 7 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling db servers up" "Patching Spec for Scaling up DBservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":3}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":3}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4a.fish b/tests/acceptance/semiautomation/test4a.fish new file mode 100755 index 000000000..be4e5804f --- /dev/null +++ b/tests/acceptance/semiautomation/test4a.fish @@ -0,0 +1,46 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test4a +set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" +set -g YAMLFILE generated/cluster-local-storage-enterprise-dev.yaml +set -g YAMLFILESTORAGE generated/local-storage-community-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy local storage: +kubectl apply -f $YAMLFILESTORAGE +and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 60 +or fail "Local storage could not be deployed." + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *acceptance" 6 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +kubectl delete -f $YAMLFILESTORAGE +kubectl delete storageclass acceptance +waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 120 +or fail "Could not delete deployed storageclass." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4b.fish b/tests/acceptance/semiautomation/test4b.fish new file mode 100755 index 000000000..ad340ae36 --- /dev/null +++ b/tests/acceptance/semiautomation/test4b.fish @@ -0,0 +1,46 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test4a +set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g YAMLFILESTORAGE generated/local-storage-community-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy local storage: +kubectl apply -f $YAMLFILESTORAGE +and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 60 +or fail "Local storage could not be deployed." + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *standard" 6 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +kubectl delete -f $YAMLFILESTORAGE +kubectl delete storageclass acceptance +waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 120 +or fail "Could not delete deployed storageclass." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5a.fish b/tests/acceptance/semiautomation/test5a.fish new file mode 100755 index 000000000..45ca671a8 --- /dev/null +++ b/tests/acceptance/semiautomation/test5a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5a +set -g TESTDESC "Pod resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and kill the single server pod." "Wait until it comes back and then see if the data is still there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5b.fish b/tests/acceptance/semiautomation/test5b.fish new file mode 100755 index 000000000..796d44832 --- /dev/null +++ b/tests/acceptance/semiautomation/test5b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5b +set -g TESTDESC "Pod resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, kill one single server pod after another." "They should come back, service should continue." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5c.fish b/tests/acceptance/semiautomation/test5c.fish new file mode 100755 index 000000000..5efb44a21 --- /dev/null +++ b/tests/acceptance/semiautomation/test5c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5c +set -g TESTDESC "Pod resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, kill one pod after another with enough time in between." "They should come back, service should continue." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6a.fish b/tests/acceptance/semiautomation/test6a.fish new file mode 100755 index 000000000..be7035f18 --- /dev/null +++ b/tests/acceptance/semiautomation/test6a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6a +set -g TESTDESC "Node resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and reboot node the single pod is running on." "Wait until it comes back and then see if the data is still there and the server is responsive." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6b.fish b/tests/acceptance/semiautomation/test6b.fish new file mode 100755 index 000000000..5d5a4cc7c --- /dev/null +++ b/tests/acceptance/semiautomation/test6b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6b +set -g TESTDESC "Node resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, reboot the node on which the ready single server pod resides." "The node and pod should come back, service should be uninterrupted." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6c.fish b/tests/acceptance/semiautomation/test6c.fish new file mode 100755 index 000000000..d1f4d4e08 --- /dev/null +++ b/tests/acceptance/semiautomation/test6c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6c +set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, reboot nodes one after another with enough time in between." "They should come back, service should not be interrupted." "Even writes should be possible during the restart." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6d.fish b/tests/acceptance/semiautomation/test6d.fish new file mode 100755 index 000000000..3447e3d08 --- /dev/null +++ b/tests/acceptance/semiautomation/test6d.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6d +set -g TESTDESC "Node resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and remove the node the single pod is running on." "Wait until a replacement is back." "This can only work with network attached storage." "Then see if the data is still there and the new server is responsive." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6e.fish b/tests/acceptance/semiautomation/test6e.fish new file mode 100755 index 000000000..87e01dcc6 --- /dev/null +++ b/tests/acceptance/semiautomation/test6e.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6e +set -g TESTDESC "Node resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, remove the node on which the ready single server pod resides." "The node and pod should come back (on a different machine)." "The service should be uninterrupted." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6f.fish b/tests/acceptance/semiautomation/test6f.fish new file mode 100755 index 000000000..8e4a70ec7 --- /dev/null +++ b/tests/acceptance/semiautomation/test6f.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6c +set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, remove a node." "Pods should come back, service should not be interrupted." "Even writes should be possible during the redeployment." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6g.fish b/tests/acceptance/semiautomation/test6g.fish new file mode 100755 index 000000000..3ae003afa --- /dev/null +++ b/tests/acceptance/semiautomation/test6g.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6f +set -g TESTDESC "Node resilience in active/failover, repl factor 1 (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in, use replication factor 1 for one collection." "Then, remove the node on which the dbserver pod with the shard resides." "The lost pods should come back (on a different machine), except the one." "The service should be uninterrupted, except for the one collection." "All data except the one must still be there." "This is only for locally attached persistent volumes." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test7a.fish b/tests/acceptance/semiautomation/test7a.fish new file mode 100755 index 000000000..7bb115edd --- /dev/null +++ b/tests/acceptance/semiautomation/test7a.fish @@ -0,0 +1,62 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test7a +set -g TESTDESC "Deployment of 2 clusters with sync with DC2DC (production, enterprise)" +set -g YAMLFILE generated/cluster-sync-enterprise-pro.yaml +set -g YAMLFILE2 generated/cluster-sync2-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +set -g DEPLOYMENT2 acceptance-cluster2 +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +kubectl apply -f $YAMLFILE2 +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pod" "$DEPLOYMENT2" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT2 *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT2-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT2-sync *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB (1) was not reachable." + +set ip2 (getLoadBalancerIP "$DEPLOYMENT2-ea") +testArangoDB $ip2 120 +or fail "ArangoDB (2) was not reachable." + +# Set up replication, rest is manual: +# run sed here on replication.yaml, find sync-ea first +kubectl apply -f replication.yaml + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f replication.yaml +sleep 15 +kubectl delete -f $YAMLFILE +kubectl delete -f $YAMLFILE2 +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +waitForKubectl "get pod" $DEPLOYMENT2 "" 0 120 +or fail "Could not delete deployment." + +output "Ready" ""