Skip to content

Commit

Permalink
Merge pull request #191 from vdice/fix-contrib-ci-scripts
Browse files Browse the repository at this point in the history
fix(contrib/ci/*): fix scripts
  • Loading branch information
Vaughn Dice authored Mar 27, 2017
2 parents 8279219 + f36f1e9 commit c480c01
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 74 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ IMAGE_PREFIX ?= deis

include versioning.mk

SHELL_SCRIPTS = $(wildcard _scripts/*.sh) rootfs/bin/backup rootfs/bin/is_running
SHELL_SCRIPTS = $(wildcard _scripts/*.sh contrib/ci/*.sh rootfs/bin/*backup) rootfs/bin/is_running

# The following variables describe the containerized development environment
# and other build options
Expand Down
71 changes: 42 additions & 29 deletions contrib/ci/test-minio.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,73 +2,86 @@

set -eof pipefail

TEST_ROOT=$(dirname "${BASH_SOURCE}")/
cleanup() {
kill-containers "${MINIO_JOB}" "${PG_JOB}"
}
trap cleanup EXIT

TEST_ROOT=$(dirname "${BASH_SOURCE[0]}")/
# shellcheck source=/dev/null
source "${TEST_ROOT}/test.sh"

# make sure we are in this dir
CURRENT_DIR=$(cd $(dirname $0); pwd)
CURRENT_DIR=$(cd "$(dirname "$0")"; pwd)

create-postgres-creds

puts-step "creating fake minio credentials"

# create fake AWS credentials for minio admin credentials
mkdir -p $CURRENT_DIR/tmp/aws-admin
mkdir -p "${CURRENT_DIR}"/tmp/aws-admin
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-key-id
echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-admin/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-secret-key
echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-admin/access-secret-key

# create fake AWS credentials for minio user credentials
mkdir -p $CURRENT_DIR/tmp/aws-user
mkdir -p "${CURRENT_DIR}"/tmp/aws-user
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/accesskey
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-key-id
echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/accesskey
echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/secretkey
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-secret-key
echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/secretkey
echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/access-secret-key

puts-step "creating fake kubernetes service account token"

# create fake k8s serviceaccount token for minio to "discover" itself
mkdir -p $CURRENT_DIR/tmp/k8s
echo "token" > $CURRENT_DIR/tmp/k8s/token
echo "cert" > $CURRENT_DIR/tmp/k8s/ca.crt
mkdir -p "${CURRENT_DIR}"/tmp/k8s
echo "token" > "${CURRENT_DIR}"/tmp/k8s/token
echo "cert" > "${CURRENT_DIR}"/tmp/k8s/ca.crt

# kill containers when this script exits or errors out
trap 'kill-container $MINIO_JOB' INT TERM
# boot minio
MINIO_JOB=$(docker run -dv $CURRENT_DIR/tmp/aws-admin:/var/run/secrets/deis/minio/admin -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/minio/user -v $CURRENT_DIR/tmp/k8s:/var/run/secrets/kubernetes.io/serviceaccount quay.io/deisci/minio:canary boot server /home/minio/)
MINIO_JOB=$(docker run -d \
-v "${CURRENT_DIR}"/tmp/aws-admin:/var/run/secrets/deis/minio/admin \
-v "${CURRENT_DIR}"/tmp/aws-user:/var/run/secrets/deis/minio/user \
-v "${CURRENT_DIR}"/tmp/k8s:/var/run/secrets/kubernetes.io/serviceaccount \
quay.io/deisci/minio:canary boot server /home/minio/)

# boot postgres, linking the minio container and setting DEIS_MINIO_SERVICE_HOST and DEIS_MINIO_SERVICE_PORT
PG_CMD="docker run -d --link $MINIO_JOB:minio -e PGCTLTIMEOUT=1200 -e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio -e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 -v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1"
PG_CMD="docker run -d --link ${MINIO_JOB}:minio -e PGCTLTIMEOUT=1200 \
-e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio \
-e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 \
-v ${CURRENT_DIR}/tmp/creds:/var/run/secrets/deis/database/creds \
-v ${CURRENT_DIR}/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1"

# kill containers when this script exits or errors out
trap 'kill-container $PG_JOB' INT TERM
start-postgres "$PG_CMD"
start-postgres "${PG_CMD}"

# display logs for debugging purposes
puts-step "displaying minio logs"
docker logs $MINIO_JOB
docker logs "${MINIO_JOB}"

check-postgres $PG_JOB
check-postgres "${PG_JOB}"

# check if minio has the 5 backups
puts-step "checking if minio has 5 backups"
BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep json)"
NUM_BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep -c json)"
BACKUPS="$(docker exec "${MINIO_JOB}" ls /home/minio/dbwal/basebackups_005/ | grep json)"
NUM_BACKUPS="$(echo "${BACKUPS}" | wc -w)"
# NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
# in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
if [[ ! "$NUM_BACKUPS" -eq "5" && ! "$NUM_BACKUPS" -eq "6" ]]; then
if [[ ! "${NUM_BACKUPS}" -eq "5" && ! "${NUM_BACKUPS}" -eq "6" ]]; then
puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
puts-error "$BACKUPS"
puts-error "${BACKUPS}"
exit 1
fi

# kill off postgres, then reboot and see if it's running after recovering from backups
puts-step "shutting off postgres, then rebooting to test data recovery"
kill-container $PG_JOB
kill-containers "${PG_JOB}"

start-postgres "${PG_CMD}"

start-postgres "$PG_CMD"
check-postgres "${PG_JOB}"

check-postgres $PG_JOB
puts-step "tests PASSED!"
exit 0
68 changes: 34 additions & 34 deletions contrib/ci/test-swift.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,77 +2,77 @@

set -eof pipefail

TEST_ROOT=$(dirname "${BASH_SOURCE}")/
cleanup() {
kill-containers "${SWIFT_DATA}" "${SWIFT_JOB}" "${PG_JOB}"
}
trap cleanup EXIT

TEST_ROOT=$(dirname "${BASH_SOURCE[0]}")/
# shellcheck source=/dev/null
source "${TEST_ROOT}/test.sh"

# make sure we are in this dir
CURRENT_DIR=$(cd $(dirname $0); pwd)
CURRENT_DIR=$(cd "$(dirname "$0")"; pwd)

create-postgres-creds

puts-step "fetching openstack credentials"

# turn creds into something that we can use.
mkdir -p $CURRENT_DIR/tmp/swift
mkdir -p "${CURRENT_DIR}"/tmp/swift

# guess which value to use for tenant:
TENANT=""

echo "test:tester" > $CURRENT_DIR/tmp/swift/username
echo "testing" > $CURRENT_DIR/tmp/swift/password
echo ${TENANT} > $CURRENT_DIR/tmp/swift/tenant
echo "http://swift:8080/auth/v1.0" > $CURRENT_DIR/tmp/swift/authurl
echo "1" > $CURRENT_DIR/tmp/swift/authversion
echo "deis-swift-test" > $CURRENT_DIR/tmp/swift/database-container
echo "test:tester" > "${CURRENT_DIR}"/tmp/swift/username
echo "testing" > "${CURRENT_DIR}"/tmp/swift/password
echo "${TENANT}" > "${CURRENT_DIR}"/tmp/swift/tenant
echo "http://swift:8080/auth/v1.0" > "${CURRENT_DIR}"/tmp/swift/authurl
echo "1" > "${CURRENT_DIR}"/tmp/swift/authversion
echo "deis-swift-test" > "${CURRENT_DIR}"/tmp/swift/database-container

# kill containers when this script exits or errors out
trap 'kill-container $SWIFT_DATA' INT TERM
# boot swift
SWIFT_DATA=$(docker run -v /srv --name SWIFT_DATA busybox)

# kill containers when this script exits or errors out
trap 'kill-container $SWIFT_JOB' INT TERM
SWIFT_JOB=$(docker run --name onlyone --hostname onlyone -d --volumes-from SWIFT_DATA -t deis/swift-onlyone:git-8516d23)
SWIFT_DATA=$(docker run -d -v /srv --name SWIFT_DATA busybox)

SWIFT_JOB=$(docker run -d --name onlyone --hostname onlyone --volumes-from SWIFT_DATA -t deis/swift-onlyone:git-8516d23)

# postgres container command
PG_CMD="docker run -d --link $SWIFT_JOB:swift -e BACKUP_FREQUENCY=3s \
-e DATABASE_STORAGE=swift \
-e PGCTLTIMEOUT=1200 \
-v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds \
-v $CURRENT_DIR/tmp/swift:/var/run/secrets/deis/objectstore/creds \
$1"

# kill containers when this script exits or errors out
trap 'kill-container $PG_JOB' INT TERM
PG_CMD="docker run -d --link ${SWIFT_JOB}:swift -e BACKUP_FREQUENCY=3s \
-e DATABASE_STORAGE=swift \
-e PGCTLTIMEOUT=1200 \
-v ${CURRENT_DIR}/tmp/creds:/var/run/secrets/deis/database/creds \
-v ${CURRENT_DIR}/tmp/swift:/var/run/secrets/deis/objectstore/creds \
$1"

start-postgres "$PG_CMD"

# display logs for debugging purposes
puts-step "displaying swift logs"
docker logs $SWIFT_JOB
docker logs "${SWIFT_JOB}"

check-postgres $PG_JOB
check-postgres "${PG_JOB}"

# check if swift has some backups ... 3 ?
puts-step "checking if swift has at least 3 backups"

BACKUPS="$(docker exec $SWIFT_JOB swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep json)"
NUM_BACKUPS="$(docker exec $SWIFT_JOB swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep -c json)"
BACKUPS="$(docker exec "${SWIFT_JOB}" swift -A http://127.0.0.1:8080/auth/v1.0 \
-U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep json)"
NUM_BACKUPS="$(echo "${BACKUPS}" | wc -w)"
# NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
# in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
if [[ ! "$NUM_BACKUPS" -eq "5" && ! "$NUM_BACKUPS" -eq "6" ]]; then
if [[ ! "${NUM_BACKUPS}" -eq "5" && ! "${NUM_BACKUPS}" -eq "6" ]]; then
puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
puts-error "$BACKUPS"
puts-error "${BACKUPS}"
exit 1
fi

# kill off postgres, then reboot and see if it's running after recovering from backups
puts-step "shutting off postgres, then rebooting to test data recovery"
kill-container $PG_JOB
kill-containers "${PG_JOB}"

start-postgres "$PG_CMD"
start-postgres "${PG_CMD}"

check-postgres $PG_JOB
check-postgres "${PG_JOB}"

puts-step "tests PASSED!"
exit 0
21 changes: 11 additions & 10 deletions contrib/ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,29 @@
set -eof pipefail

puts-step() {
echo "-----> $@"
echo "-----> $*"
}

puts-error() {
echo "!!! $@"
echo "!!! $*"
}

kill-container() {
puts-step "destroying container $1"
docker rm -f "$1"
kill-containers() {
puts-step "destroying containers $*"
docker rm -f "$@"
}

create-postgres-creds() {
puts-step "creating fake postgres credentials"

# create fake postgres credentials
mkdir -p $CURRENT_DIR/tmp/creds
echo "testuser" > $CURRENT_DIR/tmp/creds/user
echo "icanttellyou" > $CURRENT_DIR/tmp/creds/password
mkdir -p "${CURRENT_DIR}"/tmp/creds
echo "testuser" > "${CURRENT_DIR}"/tmp/creds/user
echo "icanttellyou" > "${CURRENT_DIR}"/tmp/creds/password
}

start-postgres() {
export PG_JOB
PG_JOB=$($1)
# wait for postgres to boot
puts-step "sleeping for 90s while postgres is booting..."
Expand All @@ -34,9 +35,9 @@ start-postgres() {
check-postgres() {
# display logs for debugging purposes
puts-step "displaying postgres logs"
docker logs $1
docker logs "$1"

# check if postgres is running
puts-step "checking if postgres is running"
docker exec $1 is_running
docker exec "$1" is_running
}

0 comments on commit c480c01

Please sign in to comment.