From b8c4144cf2353b425e6fda74419374be3294706d Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Mon, 29 Oct 2018 12:54:45 +0100 Subject: [PATCH 01/12] First stab at semiautomation. --- tests/acceptance/semiautomation/README.md | 18 +++++ tests/acceptance/semiautomation/helper.fish | 89 +++++++++++++++++++++ tests/acceptance/semiautomation/test1a.fish | 32 ++++++++ tests/acceptance/semiautomation/test1b.fish | 34 ++++++++ tests/acceptance/semiautomation/test1c.fish | 32 ++++++++ 5 files changed, 205 insertions(+) create mode 100644 tests/acceptance/semiautomation/README.md create mode 100644 tests/acceptance/semiautomation/helper.fish create mode 100755 tests/acceptance/semiautomation/test1a.fish create mode 100755 tests/acceptance/semiautomation/test1b.fish create mode 100755 tests/acceptance/semiautomation/test1c.fish diff --git a/tests/acceptance/semiautomation/README.md b/tests/acceptance/semiautomation/README.md new file mode 100644 index 000000000..3c2ddd7d6 --- /dev/null +++ b/tests/acceptance/semiautomation/README.md @@ -0,0 +1,18 @@ +# Semiautomation for the acceptance test + +This is a collection of tools to perform the acceptance test faster. + +## Prerequisites + + - k8s cluster set up with `kubectl` + - `fish` shell installed + - `curl` installed + - Obi's generated templates in a subdirectory called `generated` + +## Usage + +Execute the tests like this: + + ./test1a.fish + +and follow the instructions. diff --git a/tests/acceptance/semiautomation/helper.fish b/tests/acceptance/semiautomation/helper.fish new file mode 100644 index 000000000..fc66ffc13 --- /dev/null +++ b/tests/acceptance/semiautomation/helper.fish @@ -0,0 +1,89 @@ +function printheader + echo "Test : $TESTNAME" + echo "Description : $TESTDESC" + echo "Yaml file : $YAMLFILE" + echo "Deployment name : $DEPLOYMENT" + echo +end + +function waitForKubectl + if test (count $argv) -lt 5 + return 1 + end + set -l op (string split -- " " $argv[1]) + set -l select $argv[2] + set -l good (string split -- ";" "$argv[3]") + set -l expected $argv[4] + set -l timeout $argv[5] + + echo + echo "Testing `kubectl $op`" + echo " for occurrences of `$select`" + echo " that are `$good`, expecting `$expected`" + echo + + set -l t 0 + while true + set -l l (kubectl $op | grep $select) + set -l nfound (count $l) + set -l ngood 0 + for line in $l + if string match -r $good $line > /dev/null + set ngood (math $ngood + 1) + end + end + echo -n "Good=$ngood, found=$nfound, expected=$expected, try $t ($timeout)" + echo -n -e "\r" + if test $ngood -eq $expected -a $nfound -eq $expected ; echo ; return 0 ; end + if test $t -gt $timeout ; echo ; echo Timeout ; return 2 ; end + set t (math $t + 1) + sleep 1 + end +end + +function output + if which say > /dev/null + say $argv[1] > /dev/null ^ /dev/null + end + for l in $argv[2..-1] ; echo $l ; end +end + +function log + echo "$argv[1] Test: $TESTNAME, Desc: $TESTDESC" >> testprotocol.log +end + +function inputAndLogResult + read -P "Test result: " result + log $result +end + +function waitForUser + read -P "Hit enter to continue" +end + +function getLoadBalancerIP + string trim -c '"' (kubectl get service $argv[1] -o=json | \ + jq .status.loadBalancer.ingress[0].ip) +end + +function testArangoDB + set -l ip $argv[1] + set -l timeout $argv[2] + set -l n 0 + echo Waiting for ArangoDB to be ready... + while true + if set v (curl -k -s -m 3 "https://$ip:8529/_api/version" --user root: | jq .server) + if test "$v" = '"arango"' ; return 0 ; end + end + set n (math $n + 1) + if test "$n" -gt "$timeout" + echo Timeout + return 1 + end + echo Waiting "$n($timeout)"... + end +end + +function fail + output "Failed" $argv +end diff --git a/tests/acceptance/semiautomation/test1a.fish b/tests/acceptance/semiautomation/test1a.fish new file mode 100755 index 000000000..32980cc94 --- /dev/null +++ b/tests/acceptance/semiautomation/test1a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1a +set -g TESTDESC "Deployment of mode single" +set -g YAMLFILE generated/single-community-dev.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1b.fish b/tests/acceptance/semiautomation/test1b.fish new file mode 100755 index 000000000..30c6f164c --- /dev/null +++ b/tests/acceptance/semiautomation/test1b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1b +set -g TESTDESC "Deployment of mode active/failover" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1c.fish b/tests/acceptance/semiautomation/test1c.fish new file mode 100755 index 000000000..d1b70e217 --- /dev/null +++ b/tests/acceptance/semiautomation/test1c.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1b +set -g TESTDESC "Deployment of mode cluster (enterprise)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1/1 *Running" 9 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From ad9e603705d125c962d91eefc551e95423b6ecf7 Mon Sep 17 00:00:00 2001 From: lamai93 Date: Mon, 29 Oct 2018 15:27:57 +0100 Subject: [PATCH 02/12] Added tests 1c, 1d, 2a and 2b. --- tests/acceptance/semiautomation/test1c.fish | 6 +- tests/acceptance/semiautomation/test1d.fish | 38 +++++++++++ tests/acceptance/semiautomation/test2a.fish | 50 +++++++++++++++ tests/acceptance/semiautomation/test2b.fish | 71 +++++++++++++++++++++ 4 files changed, 163 insertions(+), 2 deletions(-) create mode 100755 tests/acceptance/semiautomation/test1d.fish create mode 100755 tests/acceptance/semiautomation/test2a.fish create mode 100755 tests/acceptance/semiautomation/test2b.fish diff --git a/tests/acceptance/semiautomation/test1c.fish b/tests/acceptance/semiautomation/test1c.fish index d1b70e217..259062b51 100755 --- a/tests/acceptance/semiautomation/test1c.fish +++ b/tests/acceptance/semiautomation/test1c.fish @@ -2,7 +2,7 @@ source helper.fish -set -g TESTNAME test1b +set -g TESTNAME test1c set -g TESTDESC "Deployment of mode cluster (enterprise)" set -g YAMLFILE generated/cluster-enterprise-dev.yaml set -g DEPLOYMENT acceptance-cluster @@ -10,7 +10,9 @@ printheader # Deploy and check kubectl apply -f $YAMLFILE -and waitForKubectl "get pod" $DEPLOYMENT "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 or fail "Deployment did not get ready." diff --git a/tests/acceptance/semiautomation/test1d.fish b/tests/acceptance/semiautomation/test1d.fish new file mode 100755 index 000000000..fb7c1fd52 --- /dev/null +++ b/tests/acceptance/semiautomation/test1d.fish @@ -0,0 +1,38 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test1d +set -g TESTDESC "Deployment of mode cluster with sync (enterprise)" +set -g YAMLFILE generated/cluster-sync-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2a.fish b/tests/acceptance/semiautomation/test2a.fish new file mode 100755 index 000000000..47e935809 --- /dev/null +++ b/tests/acceptance/semiautomation/test2a.fish @@ -0,0 +1,50 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test2a +set -g TESTDESC "Scale an active failover deployment (enterprise)" +set -g YAMLFILE generated/activefailover-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Scale up the deployment +output "Next" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":3}]' +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 6 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 2 120 +or fail "Patched deployment did not get ready." + +# Scale down the deployment +output "Next" "Patching Spec for Scaling down" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":2}]' +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +or fail "Patched deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2b.fish b/tests/acceptance/semiautomation/test2b.fish new file mode 100755 index 000000000..d03bc97bd --- /dev/null +++ b/tests/acceptance/semiautomation/test2b.fish @@ -0,0 +1,71 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test2b +set -g TESTDESC "Scale an cluster deployment (enterprise)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 60 +or fail "ArangoDB was not reachable." + +# Patching +output "Patching" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":5}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 11 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Patching +output "Patching" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":4}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 12 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 +or fail "Deployment did not get ready." + +# Patching +output "Patching" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 +or fail "Deployment did not get ready." + +# Patching +output "Patching" "Patching Spec for Scaling up" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":1}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 6 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 1 120 +or fail "Deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 01534b537045a9f6a404a07156ea098f097feddf Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Wed, 31 Oct 2018 16:07:54 +0100 Subject: [PATCH 03/12] More tests ready. --- .../cluster-local-storage.template.yaml | 5 +- tests/acceptance/cluster-sync.template.yaml | 2 + tests/acceptance/generate.sh | 2 + tests/acceptance/local-storage.template.yaml | 9 +++ tests/acceptance/semiautomation/helper.fish | 4 ++ tests/acceptance/semiautomation/test1a.fish | 6 +- tests/acceptance/semiautomation/test1b.fish | 8 +-- tests/acceptance/semiautomation/test1c.fish | 6 +- tests/acceptance/semiautomation/test1d.fish | 8 +-- tests/acceptance/semiautomation/test2a.fish | 4 +- tests/acceptance/semiautomation/test2b.fish | 14 ++-- tests/acceptance/semiautomation/test3a.fish | 32 +++++++++ tests/acceptance/semiautomation/test3b.fish | 34 +++++++++ tests/acceptance/semiautomation/test3c.fish | 34 +++++++++ tests/acceptance/semiautomation/test3d.fish | 71 +++++++++++++++++++ tests/acceptance/semiautomation/test4a.fish | 46 ++++++++++++ tests/acceptance/semiautomation/test4b.fish | 46 ++++++++++++ 17 files changed, 307 insertions(+), 24 deletions(-) create mode 100644 tests/acceptance/local-storage.template.yaml create mode 100755 tests/acceptance/semiautomation/test3a.fish create mode 100755 tests/acceptance/semiautomation/test3b.fish create mode 100755 tests/acceptance/semiautomation/test3c.fish create mode 100755 tests/acceptance/semiautomation/test3d.fish create mode 100755 tests/acceptance/semiautomation/test4a.fish create mode 100755 tests/acceptance/semiautomation/test4b.fish diff --git a/tests/acceptance/cluster-local-storage.template.yaml b/tests/acceptance/cluster-local-storage.template.yaml index 65a32bdaf..1f6b9136c 100644 --- a/tests/acceptance/cluster-local-storage.template.yaml +++ b/tests/acceptance/cluster-local-storage.template.yaml @@ -8,4 +8,7 @@ spec: externalAccess: type: LoadBalancer mode: Cluster - storageClassName: acceptance + agents: + storageClassName: acceptance + dbservers: + storageClassName: acceptance diff --git a/tests/acceptance/cluster-sync.template.yaml b/tests/acceptance/cluster-sync.template.yaml index b0c15f547..c271837fb 100644 --- a/tests/acceptance/cluster-sync.template.yaml +++ b/tests/acceptance/cluster-sync.template.yaml @@ -10,3 +10,5 @@ spec: mode: Cluster sync: enabled: true + externalAccess: + type: LoadBalancer diff --git a/tests/acceptance/generate.sh b/tests/acceptance/generate.sh index aae318ac4..b922a5909 100755 --- a/tests/acceptance/generate.sh +++ b/tests/acceptance/generate.sh @@ -5,6 +5,8 @@ version="arangodb-preview:3.4.0-rc.3" enterprise_secret="$ARANGO_EP_SECRET" #only the number community="arangodb/$version" enterprise="registry.arangodb.com/arangodb/$version-$enterprise_secret" +community="neunhoef/arangodb:3.4" +enterprise="neunhoef/arangodb:3.4" rm -fr generated mkdir -p generated diff --git a/tests/acceptance/local-storage.template.yaml b/tests/acceptance/local-storage.template.yaml new file mode 100644 index 000000000..569221d93 --- /dev/null +++ b/tests/acceptance/local-storage.template.yaml @@ -0,0 +1,9 @@ +apiVersion: "storage.arangodb.com/v1alpha" +kind: "ArangoLocalStorage" +metadata: + name: "acceptance-local-storage" +spec: + storageClass: + name: acceptance + localPath: + - /var/lib/acceptance-test diff --git a/tests/acceptance/semiautomation/helper.fish b/tests/acceptance/semiautomation/helper.fish index fc66ffc13..d4efaecf6 100644 --- a/tests/acceptance/semiautomation/helper.fish +++ b/tests/acceptance/semiautomation/helper.fish @@ -45,6 +45,7 @@ function output if which say > /dev/null say $argv[1] > /dev/null ^ /dev/null end + echo for l in $argv[2..-1] ; echo $l ; end end @@ -55,6 +56,7 @@ end function inputAndLogResult read -P "Test result: " result log $result + echo end function waitForUser @@ -81,9 +83,11 @@ function testArangoDB return 1 end echo Waiting "$n($timeout)"... + sleep 1 end end function fail output "Failed" $argv + exit 1 end diff --git a/tests/acceptance/semiautomation/test1a.fish b/tests/acceptance/semiautomation/test1a.fish index 32980cc94..f3dba507f 100755 --- a/tests/acceptance/semiautomation/test1a.fish +++ b/tests/acceptance/semiautomation/test1a.fish @@ -3,7 +3,7 @@ source helper.fish set -g TESTNAME test1a -set -g TESTDESC "Deployment of mode single" +set -g TESTDESC "Deployment of mode single (development)" set -g YAMLFILE generated/single-community-dev.yaml set -g DEPLOYMENT acceptance-single printheader @@ -12,12 +12,12 @@ printheader kubectl apply -f $YAMLFILE and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Manual check diff --git a/tests/acceptance/semiautomation/test1b.fish b/tests/acceptance/semiautomation/test1b.fish index 30c6f164c..225a2f922 100755 --- a/tests/acceptance/semiautomation/test1b.fish +++ b/tests/acceptance/semiautomation/test1b.fish @@ -3,8 +3,8 @@ source helper.fish set -g TESTNAME test1b -set -g TESTDESC "Deployment of mode active/failover" -set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g TESTDESC "Deployment of mode active/failover (development)" +set -g YAMLFILE generated/activefailover-community-dev.yaml set -g DEPLOYMENT acceptance-activefailover printheader @@ -14,12 +14,12 @@ and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Manual check diff --git a/tests/acceptance/semiautomation/test1c.fish b/tests/acceptance/semiautomation/test1c.fish index 259062b51..d72dec545 100755 --- a/tests/acceptance/semiautomation/test1c.fish +++ b/tests/acceptance/semiautomation/test1c.fish @@ -3,7 +3,7 @@ source helper.fish set -g TESTNAME test1c -set -g TESTDESC "Deployment of mode cluster (enterprise)" +set -g TESTDESC "Deployment of mode cluster (development, enterprise)" set -g YAMLFILE generated/cluster-enterprise-dev.yaml set -g DEPLOYMENT acceptance-cluster printheader @@ -14,12 +14,12 @@ and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Manual check diff --git a/tests/acceptance/semiautomation/test1d.fish b/tests/acceptance/semiautomation/test1d.fish index fb7c1fd52..08b53edb7 100755 --- a/tests/acceptance/semiautomation/test1d.fish +++ b/tests/acceptance/semiautomation/test1d.fish @@ -3,7 +3,7 @@ source helper.fish set -g TESTNAME test1d -set -g TESTDESC "Deployment of mode cluster with sync (enterprise)" +set -g TESTDESC "Deployment of mode cluster with sync (development, enterprise)" set -g YAMLFILE generated/cluster-sync-enterprise-dev.yaml set -g DEPLOYMENT acceptance-cluster printheader @@ -17,13 +17,13 @@ and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Manual check diff --git a/tests/acceptance/semiautomation/test2a.fish b/tests/acceptance/semiautomation/test2a.fish index 47e935809..80f55634b 100755 --- a/tests/acceptance/semiautomation/test2a.fish +++ b/tests/acceptance/semiautomation/test2a.fish @@ -14,12 +14,12 @@ and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Scale up the deployment diff --git a/tests/acceptance/semiautomation/test2b.fish b/tests/acceptance/semiautomation/test2b.fish index d03bc97bd..0dd401b30 100755 --- a/tests/acceptance/semiautomation/test2b.fish +++ b/tests/acceptance/semiautomation/test2b.fish @@ -3,7 +3,7 @@ source helper.fish set -g TESTNAME test2b -set -g TESTDESC "Scale an cluster deployment (enterprise)" +set -g TESTDESC "Scale a cluster deployment (development, enterprise)" set -g YAMLFILE generated/cluster-enterprise-dev.yaml set -g DEPLOYMENT acceptance-cluster printheader @@ -15,16 +15,16 @@ and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 or fail "Deployment did not get ready." # Automatic check set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 60 +testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Patching -output "Patching" "Patching Spec for Scaling up" +output "Scaling db servers up" "Patching Spec for Scaling up DBservers" kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":5}]' and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 11 120 and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 @@ -33,7 +33,7 @@ and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 or fail "Deployment did not get ready." # Patching -output "Patching" "Patching Spec for Scaling up" +output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":4}]' and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 12 120 and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 120 @@ -42,7 +42,7 @@ and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 or fail "Deployment did not get ready." # Patching -output "Patching" "Patching Spec for Scaling up" +output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 @@ -51,7 +51,7 @@ and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 120 or fail "Deployment did not get ready." # Patching -output "Patching" "Patching Spec for Scaling up" +output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":1}]' and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 6 120 and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 diff --git a/tests/acceptance/semiautomation/test3a.fish b/tests/acceptance/semiautomation/test3a.fish new file mode 100755 index 000000000..1d38c85ee --- /dev/null +++ b/tests/acceptance/semiautomation/test3a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3a +set -g TESTDESC "Deployment of mode single (production)" +set -g YAMLFILE generated/single-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3b.fish b/tests/acceptance/semiautomation/test3b.fish new file mode 100755 index 000000000..14e3aee1a --- /dev/null +++ b/tests/acceptance/semiautomation/test3b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3b +set -g TESTDESC "Deployment of mode active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3c.fish b/tests/acceptance/semiautomation/test3c.fish new file mode 100755 index 000000000..847e38beb --- /dev/null +++ b/tests/acceptance/semiautomation/test3c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3c +set -g TESTDESC "Deployment of mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3d.fish b/tests/acceptance/semiautomation/test3d.fish new file mode 100755 index 000000000..c8f58c5cc --- /dev/null +++ b/tests/acceptance/semiautomation/test3d.fish @@ -0,0 +1,71 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test3d +set -g TESTDESC "Scale a cluster deployment (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Patching +output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":2}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 7 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling db servers up" "Patching Spec for Scaling up DBservers" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":3}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 120 +or fail "Deployment did not get ready." + +# Patching +output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" +kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":3}]' +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +or fail "Deployment did not get ready." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4a.fish b/tests/acceptance/semiautomation/test4a.fish new file mode 100755 index 000000000..be4e5804f --- /dev/null +++ b/tests/acceptance/semiautomation/test4a.fish @@ -0,0 +1,46 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test4a +set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" +set -g YAMLFILE generated/cluster-local-storage-enterprise-dev.yaml +set -g YAMLFILESTORAGE generated/local-storage-community-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy local storage: +kubectl apply -f $YAMLFILESTORAGE +and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 60 +or fail "Local storage could not be deployed." + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *acceptance" 6 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +kubectl delete -f $YAMLFILESTORAGE +kubectl delete storageclass acceptance +waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 120 +or fail "Could not delete deployed storageclass." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4b.fish b/tests/acceptance/semiautomation/test4b.fish new file mode 100755 index 000000000..ad340ae36 --- /dev/null +++ b/tests/acceptance/semiautomation/test4b.fish @@ -0,0 +1,46 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test4a +set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" +set -g YAMLFILE generated/cluster-enterprise-dev.yaml +set -g YAMLFILESTORAGE generated/local-storage-community-dev.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy local storage: +kubectl apply -f $YAMLFILESTORAGE +and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 60 +or fail "Local storage could not be deployed." + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *standard" 6 120 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +kubectl delete -f $YAMLFILESTORAGE +kubectl delete storageclass acceptance +waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 120 +or fail "Could not delete deployed storageclass." + +output "Ready" "" From 2ea94c162eb29931ef31ed5445de78578188a168 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Wed, 31 Oct 2018 16:15:54 +0100 Subject: [PATCH 04/12] New test. --- tests/acceptance/semiautomation/test5a.fish | 32 +++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100755 tests/acceptance/semiautomation/test5a.fish diff --git a/tests/acceptance/semiautomation/test5a.fish b/tests/acceptance/semiautomation/test5a.fish new file mode 100755 index 000000000..45ca671a8 --- /dev/null +++ b/tests/acceptance/semiautomation/test5a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5a +set -g TESTDESC "Pod resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and kill the single server pod." "Wait until it comes back and then see if the data is still there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 0444131435438ef1550321326b838f7378015a8f Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Wed, 31 Oct 2018 16:25:29 +0100 Subject: [PATCH 05/12] New tests. --- tests/acceptance/semiautomation/test5b.fish | 34 +++++++++++++++++++++ tests/acceptance/semiautomation/test5c.fish | 34 +++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100755 tests/acceptance/semiautomation/test5b.fish create mode 100755 tests/acceptance/semiautomation/test5c.fish diff --git a/tests/acceptance/semiautomation/test5b.fish b/tests/acceptance/semiautomation/test5b.fish new file mode 100755 index 000000000..796d44832 --- /dev/null +++ b/tests/acceptance/semiautomation/test5b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5b +set -g TESTDESC "Pod resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, kill one single server pod after another." "They should come back, service should continue." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5c.fish b/tests/acceptance/semiautomation/test5c.fish new file mode 100755 index 000000000..5efb44a21 --- /dev/null +++ b/tests/acceptance/semiautomation/test5c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test5c +set -g TESTDESC "Pod resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, kill one pod after another with enough time in between." "They should come back, service should continue." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 1f99e464abb3f9a5cc64c32b406283210844b426 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Wed, 31 Oct 2018 16:31:41 +0100 Subject: [PATCH 06/12] New tests. --- tests/acceptance/semiautomation/test6a.fish | 32 +++++++++++++++++++ tests/acceptance/semiautomation/test6b.fish | 34 +++++++++++++++++++++ tests/acceptance/semiautomation/test6c.fish | 34 +++++++++++++++++++++ 3 files changed, 100 insertions(+) create mode 100755 tests/acceptance/semiautomation/test6a.fish create mode 100755 tests/acceptance/semiautomation/test6b.fish create mode 100755 tests/acceptance/semiautomation/test6c.fish diff --git a/tests/acceptance/semiautomation/test6a.fish b/tests/acceptance/semiautomation/test6a.fish new file mode 100755 index 000000000..be7035f18 --- /dev/null +++ b/tests/acceptance/semiautomation/test6a.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6a +set -g TESTDESC "Node resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and reboot node the single pod is running on." "Wait until it comes back and then see if the data is still there and the server is responsive." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6b.fish b/tests/acceptance/semiautomation/test6b.fish new file mode 100755 index 000000000..5d5a4cc7c --- /dev/null +++ b/tests/acceptance/semiautomation/test6b.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6b +set -g TESTDESC "Node resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, reboot the node on which the ready single server pod resides." "The node and pod should come back, service should be uninterrupted." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6c.fish b/tests/acceptance/semiautomation/test6c.fish new file mode 100755 index 000000000..d1f4d4e08 --- /dev/null +++ b/tests/acceptance/semiautomation/test6c.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6c +set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, reboot nodes one after another with enough time in between." "They should come back, service should not be interrupted." "Even writes should be possible during the restart." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 498b0e51c3286e5ba0d5c8ccd57863ac027715a7 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Wed, 31 Oct 2018 16:54:28 +0100 Subject: [PATCH 07/12] New tests. --- tests/acceptance/semiautomation/test6d.fish | 32 +++++++++++++++++++ tests/acceptance/semiautomation/test6e.fish | 34 +++++++++++++++++++++ tests/acceptance/semiautomation/test6f.fish | 34 +++++++++++++++++++++ 3 files changed, 100 insertions(+) create mode 100755 tests/acceptance/semiautomation/test6d.fish create mode 100755 tests/acceptance/semiautomation/test6e.fish create mode 100755 tests/acceptance/semiautomation/test6f.fish diff --git a/tests/acceptance/semiautomation/test6d.fish b/tests/acceptance/semiautomation/test6d.fish new file mode 100755 index 000000000..3447e3d08 --- /dev/null +++ b/tests/acceptance/semiautomation/test6d.fish @@ -0,0 +1,32 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6d +set -g TESTDESC "Node resilience in mode single (production)" +set -g YAMLFILE generated/single-community-pro.yaml +set -g DEPLOYMENT acceptance-single +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and remove the node the single pod is running on." "Wait until a replacement is back." "This can only work with network attached storage." "Then see if the data is still there and the new server is responsive." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6e.fish b/tests/acceptance/semiautomation/test6e.fish new file mode 100755 index 000000000..87e01dcc6 --- /dev/null +++ b/tests/acceptance/semiautomation/test6e.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6e +set -g TESTDESC "Node resilience in active/failover (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, remove the node on which the ready single server pod resides." "The node and pod should come back (on a different machine)." "The service should be uninterrupted." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6f.fish b/tests/acceptance/semiautomation/test6f.fish new file mode 100755 index 000000000..8e4a70ec7 --- /dev/null +++ b/tests/acceptance/semiautomation/test6f.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6c +set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" +set -g YAMLFILE generated/cluster-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, remove a node." "Pods should come back, service should not be interrupted." "Even writes should be possible during the redeployment." "All data must still be there." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From e10ad689eeedca5edc7ecaa089159f9d3a3fde5a Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 2 Nov 2018 08:13:33 +0100 Subject: [PATCH 08/12] New test. --- tests/acceptance/semiautomation/test6g.fish | 34 +++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100755 tests/acceptance/semiautomation/test6g.fish diff --git a/tests/acceptance/semiautomation/test6g.fish b/tests/acceptance/semiautomation/test6g.fish new file mode 100755 index 000000000..b5e086dbc --- /dev/null +++ b/tests/acceptance/semiautomation/test6g.fish @@ -0,0 +1,34 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test6f +set -g TESTDESC "Node resilience in active/failover, repl factor 1 (production)" +set -g YAMLFILE generated/activefailover-community-pro.yaml +set -g DEPLOYMENT acceptance-activefailover +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB was not reachable." + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in, use replication factor 1 for one collection." "Then, remove the node on which the dbserver pod with the shard resides." "The node and pod should come back (on a different machine)." "The service should be uninterrupted." "All data must still be there." "This can only work for network attached persistent volumes." +inputAndLogResult + +# Cleanup +kubectl delete -f $YAMLFILE +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 95798419f6eb1d20ce510b54fc8d623f38e91b59 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 2 Nov 2018 08:16:58 +0100 Subject: [PATCH 09/12] Correct test description. --- tests/acceptance/semiautomation/test6g.fish | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/acceptance/semiautomation/test6g.fish b/tests/acceptance/semiautomation/test6g.fish index b5e086dbc..3ae003afa 100755 --- a/tests/acceptance/semiautomation/test6g.fish +++ b/tests/acceptance/semiautomation/test6g.fish @@ -23,7 +23,7 @@ testArangoDB $ip 120 or fail "ArangoDB was not reachable." # Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in, use replication factor 1 for one collection." "Then, remove the node on which the dbserver pod with the shard resides." "The node and pod should come back (on a different machine)." "The service should be uninterrupted." "All data must still be there." "This can only work for network attached persistent volumes." +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in, use replication factor 1 for one collection." "Then, remove the node on which the dbserver pod with the shard resides." "The lost pods should come back (on a different machine), except the one." "The service should be uninterrupted, except for the one collection." "All data except the one must still be there." "This is only for locally attached persistent volumes." inputAndLogResult # Cleanup From ef70706fdc2026f6531afee30360e420bbafbb96 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 2 Nov 2018 11:13:27 +0100 Subject: [PATCH 10/12] Add access package. --- tests/acceptance/cluster-sync.template.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/acceptance/cluster-sync.template.yaml b/tests/acceptance/cluster-sync.template.yaml index c271837fb..0358ee95d 100644 --- a/tests/acceptance/cluster-sync.template.yaml +++ b/tests/acceptance/cluster-sync.template.yaml @@ -12,3 +12,4 @@ spec: enabled: true externalAccess: type: LoadBalancer + accessPackageSecretNames: ["src-accesspackage"] From 2e88042a5adcf18ad3c707f6a8e3e28425cae5df Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 2 Nov 2018 11:14:21 +0100 Subject: [PATCH 11/12] test7a not quite ready yet. --- tests/acceptance/cluster-sync2.template.yaml | 14 +++++ .../semiautomation/replication.yaml | 13 ++++ tests/acceptance/semiautomation/test7a.fish | 62 +++++++++++++++++++ 3 files changed, 89 insertions(+) create mode 100644 tests/acceptance/cluster-sync2.template.yaml create mode 100644 tests/acceptance/semiautomation/replication.yaml create mode 100755 tests/acceptance/semiautomation/test7a.fish diff --git a/tests/acceptance/cluster-sync2.template.yaml b/tests/acceptance/cluster-sync2.template.yaml new file mode 100644 index 000000000..bfeb39651 --- /dev/null +++ b/tests/acceptance/cluster-sync2.template.yaml @@ -0,0 +1,14 @@ +apiVersion: "database.arangodb.com/v1alpha" +kind: "ArangoDeployment" +metadata: + name: "acceptance-cluster2" +spec: + environment: @ENVIRONMENT@ + image: @IMAGE@ + externalAccess: + type: LoadBalancer + mode: Cluster + sync: + enabled: true + externalAccess: + type: LoadBalancer diff --git a/tests/acceptance/semiautomation/replication.yaml b/tests/acceptance/semiautomation/replication.yaml new file mode 100644 index 000000000..53a6085f6 --- /dev/null +++ b/tests/acceptance/semiautomation/replication.yaml @@ -0,0 +1,13 @@ +apiVersion: "replication.database.arangodb.com/v1alpha" +kind: "ArangoDeploymentReplication" +metadata: + name: "replication-internal" +spec: + source: + masterEndpoint: ["https://@ADDRESS@:8629"] + auth: + keyfileSecretName: src-accesspackage-auth + tls: + caSecretName: src-accesspackage-ca + destination: + deploymentName: "acceptance-cluster2" diff --git a/tests/acceptance/semiautomation/test7a.fish b/tests/acceptance/semiautomation/test7a.fish new file mode 100755 index 000000000..7bb115edd --- /dev/null +++ b/tests/acceptance/semiautomation/test7a.fish @@ -0,0 +1,62 @@ +#!/usr/bin/fish + +source helper.fish + +set -g TESTNAME test7a +set -g TESTDESC "Deployment of 2 clusters with sync with DC2DC (production, enterprise)" +set -g YAMLFILE generated/cluster-sync-enterprise-pro.yaml +set -g YAMLFILE2 generated/cluster-sync2-enterprise-pro.yaml +set -g DEPLOYMENT acceptance-cluster +set -g DEPLOYMENT2 acceptance-cluster2 +printheader + +# Deploy and check +kubectl apply -f $YAMLFILE +kubectl apply -f $YAMLFILE2 +and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get pod" "$DEPLOYMENT2" "1/1 *Running" 15 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-prmr" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-agnt" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-crdn" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-syma" "1/1 *Running" 3 120 +and waitForKubectl "get pod" "$DEPLOYMENT2-sywo" "1/1 *Running" 3 120 +and waitForKubectl "get service" "$DEPLOYMENT2 *ClusterIP" 8529 1 120 +and waitForKubectl "get service" "$DEPLOYMENT2-ea *LoadBalancer" "-v;pending" 1 180 +and waitForKubectl "get service" "$DEPLOYMENT2-sync *LoadBalancer" "-v;pending" 1 180 +or fail "Deployment did not get ready." + +# Automatic check +set ip (getLoadBalancerIP "$DEPLOYMENT-ea") +testArangoDB $ip 120 +or fail "ArangoDB (1) was not reachable." + +set ip2 (getLoadBalancerIP "$DEPLOYMENT2-ea") +testArangoDB $ip2 120 +or fail "ArangoDB (2) was not reachable." + +# Set up replication, rest is manual: +# run sed here on replication.yaml, find sync-ea first +kubectl apply -f replication.yaml + +# Manual check +output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." +inputAndLogResult + +# Cleanup +kubectl delete -f replication.yaml +sleep 15 +kubectl delete -f $YAMLFILE +kubectl delete -f $YAMLFILE2 +waitForKubectl "get pod" $DEPLOYMENT "" 0 120 +waitForKubectl "get pod" $DEPLOYMENT2 "" 0 120 +or fail "Could not delete deployment." + +output "Ready" "" From 318d5ee9227a0cf7d4b596455a9c8c3cf8e05e91 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 2 Nov 2018 11:15:04 +0100 Subject: [PATCH 12/12] Update .gitignore. --- manifests/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/manifests/.gitignore b/manifests/.gitignore index 0c8b9cdf1..7f3592412 100644 --- a/manifests/.gitignore +++ b/manifests/.gitignore @@ -2,3 +2,4 @@ arango-deployment-dev.yaml arango-deployment-replication-dev.yaml arango-storage-dev.yaml arango-test-dev.yaml +arango-crd-dev.yaml