From a604ef25ca06e987cd53634e904a297df457a991 Mon Sep 17 00:00:00 2001 From: John Tuttle Date: Tue, 17 Apr 2018 16:53:17 -0400 Subject: [PATCH] Kubernetes conjur deployment scripts exist (#2) * initial attempt working end to end in GKE * check for registry path * demonstrate service account scope :) * move over helper for env var checking * rename all references of project to context * rename context to namespace * use internal docker registry for conjur appliance, use external IP for master access * remove demo, move deploy scripts to top folder * start fixing readme * add docker login instructs to readme, other minor fixes * make docker vars generic * Fixes from my run-through * move namespace creation into readme, reorder setup steps * readme updates * remove psql memory config, move namespace creation to manual step * add service address back to readme * provide CLI container manifest and instructions for using it * add namespace creation back in * replace link to CLI quickstart with link to usage * Ensure image pull secret is created in correct namespace * Fix ordering of bash options, -o pipefail is needed --- .gitignore | 3 + 0_check_dependencies.sh | 33 +++++ 1_create_conjur_namespace.sh | 17 +++ 2_build_and_push_containers.sh | 14 +++ 3_deploy_conjur_cluster.sh | 38 ++++++ 4_configure_master.sh | 22 ++++ 5_create_load_balancer.sh | 22 ++++ 6_configure_standbys.sh | 38 ++++++ 7_configure_followers.sh | 31 +++++ 8_print_config.sh | 16 +++ README.md | 147 ++++++++++++++++++++++- build/haproxy/Dockerfile | 12 ++ build/haproxy/build.sh | 4 + build/haproxy/conjur-health-check.sh | 8 ++ build/haproxy/start.sh | 10 ++ haproxy/haproxy.template.cfg | 23 ++++ haproxy/update_haproxy.sh | 95 +++++++++++++++ manifests/conjur-authenticator-role.yaml | 12 ++ manifests/conjur-cli.yaml | 25 ++++ manifests/conjur-cluster.yaml | 34 ++++++ manifests/conjur-follower.yaml | 48 ++++++++ manifests/haproxy-conjur-master.yaml | 42 +++++++ start | 16 +++ stop | 26 ++++ utils.sh | 121 +++++++++++++++++++ 25 files changed, 856 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100755 0_check_dependencies.sh create mode 100755 1_create_conjur_namespace.sh create mode 100755 2_build_and_push_containers.sh create mode 100755 3_deploy_conjur_cluster.sh create mode 100755 4_configure_master.sh create mode 100755 5_create_load_balancer.sh create mode 100755 6_configure_standbys.sh create mode 100755 7_configure_followers.sh create mode 100755 8_print_config.sh create mode 100644 build/haproxy/Dockerfile create mode 100755 build/haproxy/build.sh create mode 100755 build/haproxy/conjur-health-check.sh create mode 100755 build/haproxy/start.sh create mode 100644 haproxy/haproxy.template.cfg create mode 100755 haproxy/update_haproxy.sh create mode 100644 manifests/conjur-authenticator-role.yaml create mode 100644 manifests/conjur-cli.yaml create mode 100644 manifests/conjur-cluster.yaml create mode 100644 manifests/conjur-follower.yaml create mode 100644 manifests/haproxy-conjur-master.yaml create mode 100755 start create mode 100755 stop create mode 100755 utils.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d0165f3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +standby-seed.tar +haproxy.cfg +tmp/ diff --git a/0_check_dependencies.sh b/0_check_dependencies.sh new file mode 100755 index 0000000..7e868f3 --- /dev/null +++ b/0_check_dependencies.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -eo pipefail + +. utils.sh + +check_env_var "CONJUR_NAMESPACE_NAME" +check_env_var "DOCKER_REGISTRY_URL" +check_env_var "DOCKER_REGISTRY_PATH" +check_env_var "CONJUR_ACCOUNT" +check_env_var "CONJUR_ADMIN_PASSWORD" + +echo "Before we proceed..." + +# Confirm logged into Kubernetes. +read -p "Are you logged in to a Kubernetes cluster (yes/no)? " choice +case "$choice" in + yes ) ;; + * ) echo "You must login to a Kubernetes cluster before running this demo." && exit 1;; +esac + +read -p "Are you logged into the $DOCKER_REGISTRY_URL Docker registry (yes/no)? " choice +case "$choice" in + yes ) echo "Great! Let's go.";; + * ) echo "You must login to your Docker registry before running this demo." && exit 1;; +esac + +conjur_appliance_image=$DOCKER_REGISTRY_PATH/conjur-appliance:$CONJUR_NAMESPACE_NAME + +# Confirms Conjur image is present. +if [[ "$(docker images -q $conjur_appliance_image 2> /dev/null)" == "" ]]; then + echo "You must have the Conjur v4 Appliance tagged as $conjur_appliance_image in your Docker engine to run this script." + exit 1 +fi diff --git a/1_create_conjur_namespace.sh b/1_create_conjur_namespace.sh new file mode 100755 index 0000000..e47cd88 --- /dev/null +++ b/1_create_conjur_namespace.sh @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Creating Conjur namespace." + +set_namespace default + +if has_namespace "$CONJUR_NAMESPACE_NAME"; then + echo "Namespace '$CONJUR_NAMESPACE_NAME' exists, not going to create it." + set_namespace $CONJUR_NAMESPACE_NAME +else + echo "Creating '$CONJUR_NAMESPACE_NAME' namespace." + kubectl create namespace "$CONJUR_NAMESPACE_NAME" + set_namespace $CONJUR_NAMESPACE_NAME +fi diff --git a/2_build_and_push_containers.sh b/2_build_and_push_containers.sh new file mode 100755 index 0000000..4b384f8 --- /dev/null +++ b/2_build_and_push_containers.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Building and pushing haproxy image." + +pushd build/haproxy + ./build.sh +popd + +docker_tag_and_push "haproxy" + +echo "Docker images pushed." diff --git a/3_deploy_conjur_cluster.sh b/3_deploy_conjur_cluster.sh new file mode 100755 index 0000000..7641123 --- /dev/null +++ b/3_deploy_conjur_cluster.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Creating Conjur cluster." + +set_namespace $CONJUR_NAMESPACE_NAME + +if ! [ "${DOCKER_EMAIL}" = "" ]; then + announce "Creating image pull secret." + + kubectl delete --ignore-not-found secret conjurregcred + + kubectl create secret docker-registry conjurregcred \ + --docker-server=$DOCKER_REGISTRY_URL \ + --docker-username=$DOCKER_USERNAME \ + --docker-password=$DOCKER_PASSWORD \ + --docker-email=$DOCKER_EMAIL +fi + +conjur_appliance_image=$DOCKER_REGISTRY_PATH/conjur-appliance:$CONJUR_NAMESPACE_NAME + +echo "deploying main cluster" +sed -e "s#{{ CONJUR_APPLIANCE_IMAGE }}#$conjur_appliance_image#g" ./manifests/conjur-cluster.yaml | + kubectl create -f - + +echo "deploying followers" +sed -e "s#{{ CONJUR_APPLIANCE_IMAGE }}#$conjur_appliance_image#g" ./manifests/conjur-follower.yaml | + sed -e "s#{{ AUTHENTICATOR_SERVICE_ID }}#$AUTHENTICATOR_SERVICE_ID#g" | + kubectl create -f - + +sleep 10 + +echo "Waiting for Conjur pods to launch..." +wait_for_node $(get_master_pod_name) + +echo "Cluster created." diff --git a/4_configure_master.sh b/4_configure_master.sh new file mode 100755 index 0000000..14e212a --- /dev/null +++ b/4_configure_master.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Configuring master pod." + +set_namespace $CONJUR_NAMESPACE_NAME + +master_pod_name=$(get_master_pod_name) + +kubectl label --overwrite pod $master_pod_name role=master + +# Configure Conjur master server using evoke. +kubectl exec $master_pod_name -- evoke configure master \ + -h conjur-master \ + --master-altnames localhost,conjur-master.$CONJUR_NAMESPACE_NAME.svc.cluster.local \ + --follower-altnames conjur-follower,conjur-follower.$CONJUR_NAMESPACE_NAME.svc.cluster.local \ + -p $CONJUR_ADMIN_PASSWORD \ + $CONJUR_ACCOUNT + +echo "Master pod configured." diff --git a/5_create_load_balancer.sh b/5_create_load_balancer.sh new file mode 100755 index 0000000..f4f2f20 --- /dev/null +++ b/5_create_load_balancer.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Creating load balancer for master and standbys." + +set_namespace $CONJUR_NAMESPACE_NAME + +docker_image=${DOCKER_REGISTRY_PATH}/haproxy:$CONJUR_NAMESPACE_NAME + +sed -e "s#{{ DOCKER_IMAGE }}#$docker_image#g" ./manifests/haproxy-conjur-master.yaml | + kubectl create -f - + +wait_for_node 'haproxy-conjur-master' + +echo "Configuring load balancer..." + +# Update HAProxy config to reflect Conjur cluster and restart daemon. +haproxy/update_haproxy.sh haproxy-conjur-master + +echo "Load balancer created and configured." diff --git a/6_configure_standbys.sh b/6_configure_standbys.sh new file mode 100755 index 0000000..09d75aa --- /dev/null +++ b/6_configure_standbys.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Configuring standbys." + +set_namespace $CONJUR_NAMESPACE_NAME + +master_pod_name=$(get_master_pod_name) + +echo "Preparing standby seed files..." + +mkdir -p tmp +kubectl exec $master_pod_name evoke seed standby conjur-standby > ./tmp/standby-seed.tar + +master_pod_ip=$(kubectl describe pod $master_pod_name | awk '/IP:/ { print $2 }') +pod_list=$(kubectl get pods -l role=unset --no-headers | awk '{ print $1 }') + +for pod_name in $pod_list; do + printf "Configuring standby %s...\n" $pod_name + + kubectl label --overwrite pod $pod_name role=standby + + copy_file_to_container "./tmp/standby-seed.tar" "/tmp/standby-seed.tar" "$pod_name" + + kubectl exec $pod_name evoke unpack seed /tmp/standby-seed.tar + kubectl exec $pod_name -- evoke configure standby -i $master_pod_ip +done + +rm -rf tmp + +echo "Standbys configured." +echo "Starting synchronous replication..." + +mastercmd evoke replication sync + +echo "Standbys configured." diff --git a/7_configure_followers.sh b/7_configure_followers.sh new file mode 100755 index 0000000..d59c3a2 --- /dev/null +++ b/7_configure_followers.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +announce "Configuring followers." + +set_namespace $CONJUR_NAMESPACE_NAME + +master_pod_name=$(get_master_pod_name) + +echo "Preparing follower seed files..." + +mkdir -p tmp +kubectl exec $master_pod_name evoke seed follower conjur-follower > ./tmp/follower-seed.tar + +master_pod_ip=$(kubectl describe pod $master_pod_name | awk '/IP:/ { print $2 }') +pod_list=$(kubectl get pods -l role=follower --no-headers | awk '{ print $1 }') + +for pod_name in $pod_list; do + printf "Configuring follower %s...\n" $pod_name + + copy_file_to_container "./tmp/follower-seed.tar" "/tmp/follower-seed.tar" "$pod_name" + + kubectl exec $pod_name evoke unpack seed /tmp/follower-seed.tar + kubectl exec $pod_name -- evoke configure follower +done + +rm -rf tmp + +echo "Followers configured." diff --git a/8_print_config.sh b/8_print_config.sh new file mode 100755 index 0000000..6ac2dab --- /dev/null +++ b/8_print_config.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +set_namespace $CONJUR_NAMESPACE_NAME + +announce " +Conjur cluster is ready. + +Conjur UI address: + https://$(get_master_service_ip):443 + +Conjur admin credentials: + admin / $(rotate_api_key) +" diff --git a/README.md b/README.md index dfd3cf6..30fc50c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,147 @@ # kubernetes-conjur-deploy -Scripts for deploying Conjur to Kubernetes + +This repository contains scripts for deploying a Conjur v4 cluster to a +Kubernetes environment. + +# Setup + +The Conjur deployment scripts pick up configuration details from local +environment variables. The setup instructions below walk you through the +necessary steps for configuring your Kubernetes environment and show you which +variables need to be set before deploying. + +### Docker + +[Install Docker](https://www.docker.com/get-docker) on your local machine if you +do not already have it. + +You must have push access to a Docker registry in order to run these deploy +scripts. Provide the URL and full path of your registry: + +``` +export DOCKER_REGISTRY_URL= +export DOCKER_REGISTRY_PATH=/ +``` + +If you are using a private registry, you will also need to provide login +credentials that are used by the deployment scripts to create a [secret for +pulling images](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-in-the-cluster-that-holds-your-authorization-token): + +``` +export DOCKER_USERNAME= +export DOCKER_PASSWORD= +export DOCKER_EMAIL= +``` + +Please make sure that you are logged in to the registry before deploying. + +### Kubernetes + +Before deploying Conjur, you must first use `kubectl` to connect to your +Kubernetes environment with a user that has the `cluster-admin` role. The user +must be able to create namespaces and cluster roles. + +#### Conjur Namespace + +Provide the name of a namespace in which to deploy Conjur: + +``` +export CONJUR_NAMESPACE_NAME= +``` + +#### The `conjur-authenticator` Cluster Role + +Conjur's Kubernetes authenticator requires the following privileges: + +- [`"get"`, `"list"`] on `"pods"` for confirming a pod's namespace membership +- [`"create"`, `"get"`] on "pods/exec" for injecting a certificate into a pod + +The deploy scripts include a manifest that defines the `conjur-authenticator` +cluster role, which grants these privileges. Create the role now (note that +your user will need to have the `cluster-admin` role to do so): + +``` +kubectl create -f ./manifests/conjur-authenticator-role.yaml +``` + +### Conjur + +#### Appliance Image + +You need to obtain a Docker image of the Conjur v4 appliance and push it to your +Docker registry with the tag: + +``` +$DOCKER_REGISTRY_PATH/conjur-appliance:$CONJUR_NAMESPACE_NAME +``` + +#### Appliance Configuration + +When setting up a new Conjur installation, you must provide an account name and +a password for the admin account: + +``` +export CONJUR_ACCOUNT= +export CONJUR_ADMIN_PASSWORD= +``` + +Conjur uses [declarative policy](https://developer.conjur.net/policy) to control +access to secrets. After deploying Conjur, you need to load a policy that +defines a `webservice` to represent the Kubernetes authenticator: + +``` +- !policy +id: conjur/authn-k8s/{{ SERVICE_ID }} +``` + +The `SERVICE_ID` should describe the Kubernetes cluster in which your Conjur +deployment resides. For example, it might be something like `kubernetes/prod`. +For Conjur configuration purposes, you need to provide this value to the Conjur +deploy scripts like so: + +``` +export AUTHENTICATOR_SERVICE_ID= +``` + +This `service_id` can be anything you like, but it's important to make sure +that it matches the value that you intend to use in Conjur Policy. + +# Usage + +### Deploying Conjur + +Run `./start` to deploy Conjur. This executes the numbered scripts in sequence +to create and configure a Conjur cluster comprised of one Master, two Standbys, +and two read-only Followers. The final step will print out the necessary info +for interacting with Conjur through the CLI or UI. + +### Conjur CLI + +The deploy scripts include a manifest for creating a Conjur CLI container within +the Kubernetes environment that can then be used to interact with Conjur. Deploy +the CLI pod and SSH into it: + +``` +kubectl create -f ./manifests/conjur-cli.yaml +kubectl exec -it [cli-pod-name] bash +``` + +Once inside the CLI container, use the admin credentials to connect to Conjur: + +``` +conjur init -h conjur-master +``` + +Follow our [CLI usage instructions](https://developer.conjur.net/cli#quickstart) +to get started with the Conjur CLI. + +### Conjur UI + +Visit the Conjur UI URL in your browser and login with the admin credentials to +access the Conjur UI. + +# Test App Demo + +The [kubernetes-conjur-demo repo](https://github.com/conjurdemos/kubernetes-conjur-demo) +sets up test applications that retrieve secrets from Conjur and serves as a +useful reference when setting up your own applications to integrate with Conjur. diff --git a/build/haproxy/Dockerfile b/build/haproxy/Dockerfile new file mode 100644 index 0000000..cc727d2 --- /dev/null +++ b/build/haproxy/Dockerfile @@ -0,0 +1,12 @@ +FROM haproxy:1.7 + +RUN apt-get clean +RUN apt-get update +RUN apt-get install -y \ + curl \ + jq \ + vim + +COPY conjur-health-check.sh /root/ +COPY start.sh / +ENTRYPOINT sleep infinity diff --git a/build/haproxy/build.sh b/build/haproxy/build.sh new file mode 100755 index 0000000..b02fe5e --- /dev/null +++ b/build/haproxy/build.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -euo pipefail + +docker build -t haproxy:$CONJUR_NAMESPACE_NAME . diff --git a/build/haproxy/conjur-health-check.sh b/build/haproxy/conjur-health-check.sh new file mode 100755 index 0000000..438a7bb --- /dev/null +++ b/build/haproxy/conjur-health-check.sh @@ -0,0 +1,8 @@ +#!/bin/bash +server_address=$3 + +conjur_ok=$(curl -k -s https://$server_address/health | jq '.ok') +if [[ "$conjur_ok" == "true" ]]; then + exit 0 +fi +exit -1 diff --git a/build/haproxy/start.sh b/build/haproxy/start.sh new file mode 100755 index 0000000..ae1f7f2 --- /dev/null +++ b/build/haproxy/start.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# kill running haproxy daemon if any +haproxy_pid=$(ps aux | grep haproxy | grep -v grep | awk '{print $2}') +if [[ "$haproxy_pid" != "" ]]; then + kill -9 $haproxy_pid +fi + +# haproxy.cfg is created and updated by update_haproxy.sh script in $DEMO_ROOT/etc +haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg diff --git a/haproxy/haproxy.template.cfg b/haproxy/haproxy.template.cfg new file mode 100644 index 0000000..00eaf20 --- /dev/null +++ b/haproxy/haproxy.template.cfg @@ -0,0 +1,23 @@ +global + maxconn 256 + external-check + +defaults + timeout connect 5000ms + timeout client 50000ms + timeout server 50000ms + +frontend f_conjur_master_http + mode tcp + bind *:443 + default_backend b_conjur_master_http + +frontend f_conjur_master_pg + mode tcp + bind *:5432 + default_backend b_conjur_master_pg + +frontend f_conjur_master_ldap + mode tcp + bind *:636 + default_backend b_conjur_master_ldap diff --git a/haproxy/update_haproxy.sh b/haproxy/update_haproxy.sh new file mode 100755 index 0000000..db8488e --- /dev/null +++ b/haproxy/update_haproxy.sh @@ -0,0 +1,95 @@ +#!/bin/bash +set -eo pipefail + +# This script updates the HAProxy configuration for currently running Conjur containers +# and restarts the proxy daemon + +. ./utils.sh + +declare template_file="./haproxy/haproxy.template.cfg" +declare destination_file="./haproxy/haproxy.cfg" + +# takes one argument: the name of the HAProxy container to update +main() { + haproxy_pod_name=$1 + + echo "# This file is generated by $0 in $(pwd)." > $destination_file + cp $template_file $destination_file + update_http_servers + update_pg_servers + update_ldap_servers + + copy_file_to_container "$destination_file" "/usr/local/etc/haproxy/haproxy.cfg" "$haproxy_pod_name" + kubectl exec $haproxy_pod_name /start.sh +} + +# Appends Conjur HTTP server info in HAProxy format to haproxy.cfg. +update_http_servers() { + cat <> $destination_file + +# HTTP backend info +# Generated by $0 in $(pwd) +backend b_conjur_master_http + mode tcp + balance static-rr + option external-check + default-server inter 5s fall 3 rise 2 + external-check path "/usr/bin:/usr/local/bin" + external-check command "/root/conjur-health-check.sh" +CONFIG + + pod_list=$(kubectl get pods -l app=conjur-node --no-headers | awk '{print $1}') + + for pname in $pod_list; do + pod_ip=$(kubectl describe pod $pname | grep "IP:" | awk '{print $2}') + echo -e '\t' server $pname $pod_ip:443 check >> $destination_file + done +} + +# Appends Conjur PostgreSQL server info in HAProxy format to haproxy.cfg. +update_pg_servers() { + cat <> $destination_file + +# PG backend info +# Generated by $0 in $(pwd) +backend b_conjur_master_pg + mode tcp + balance static-rr + option external-check + default-server inter 5s fall 3 rise 2 + external-check path "/usr/bin:/usr/local/bin" + external-check command "/root/conjur-health-check.sh" +CONFIG + + pod_list=$(kubectl get pods -l app=conjur-node --no-headers | awk '{print $1}') + + for pname in $pod_list; do + pod_ip=$(kubectl describe pod $pname | grep "IP:" | awk '{print $2}') + echo -e '\t' server $pname $pod_ip:5432 check >> $destination_file + done +} + +# Appends Conjur LDAP server info in HAProxy format to haproxy.cfg. +update_ldap_servers() { + cat <> $destination_file + +# LDAP backend info +# Generated by $0 in $(pwd) +backend b_conjur_master_ldap + mode tcp + balance static-rr + option external-check + default-server inter 30s fall 3 rise 2 + external-check path "/usr/bin:/usr/local/bin" + external-check command "/root/conjur-health-check.sh" +CONFIG + + pod_list=$(kubectl get pods -l app=conjur-node --no-headers | awk '{print $1}') + + for pname in $pod_list; do + pod_ip=$(kubectl describe pod $pname | grep "IP:" | awk '{print $2}') + echo -e '\t' server $pname $pod_ip:636 check >> $destination_file + done +} + +main "$@" diff --git a/manifests/conjur-authenticator-role.yaml b/manifests/conjur-authenticator-role.yaml new file mode 100644 index 0000000..164f544 --- /dev/null +++ b/manifests/conjur-authenticator-role.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 # TODO: change this to match your k8s version +kind: ClusterRole +metadata: + name: conjur-authenticator +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create", "get"] diff --git a/manifests/conjur-cli.yaml b/manifests/conjur-cli.yaml new file mode 100644 index 0000000..0edad0d --- /dev/null +++ b/manifests/conjur-cli.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: conjur-cli + name: conjur-cli +spec: + replicas: 1 + selector: + matchLabels: + app: conjur-cli + template: + metadata: + labels: + app: conjur-cli + name: conjur-cli + spec: + containers: + - name: conjur-cli + image: cyberark/conjur-cli:4 + imagePullPolicy: Always + command: ["sleep"] + args: ["infinity"] + diff --git a/manifests/conjur-cluster.yaml b/manifests/conjur-cluster.yaml new file mode 100644 index 0000000..117f197 --- /dev/null +++ b/manifests/conjur-cluster.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: conjur-cluster + name: conjur-cluster +spec: + replicas: 3 + selector: + matchLabels: + app: conjur-node + template: + metadata: + labels: + app: conjur-node + name: conjur-node + role: unset # set to master or standby when configured + spec: + containers: + - name: conjur-appliance + image: {{ CONJUR_APPLIANCE_IMAGE }} + imagePullPolicy: Always + ports: + - containerPort: 443 + name: https + - containerPort: 636 + name: ldaps + - containerPort: 5432 + name: pg-main + - containerPort: 5433 + name: pg-audit + imagePullSecrets: + - name: conjurregcred diff --git a/manifests/conjur-follower.yaml b/manifests/conjur-follower.yaml new file mode 100644 index 0000000..d9b379e --- /dev/null +++ b/manifests/conjur-follower.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: conjur-follower + labels: + app: conjur-follower +spec: + ports: + - port: 443 + name: https + - port: 636 + name: ldaps + selector: + app: conjur-follower + +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: conjur-follower +spec: + replicas: 2 + template: + metadata: + labels: + app: conjur-follower + name: conjur-follower + role: follower + spec: + containers: + - name: conjur-appliance + image: {{ CONJUR_APPLIANCE_IMAGE }} + imagePullPolicy: Always + env: + - name: CONJUR_AUTHENTICATORS + value: authn-k8s/{{ AUTHENTICATOR_SERVICE_ID }} + ports: + - containerPort: 443 + name: https + - containerPort: 636 + name: ldaps + - containerPort: 5432 + name: pg-main + - containerPort: 5433 + name: pg-audit + imagePullSecrets: + - name: conjurregcred diff --git a/manifests/haproxy-conjur-master.yaml b/manifests/haproxy-conjur-master.yaml new file mode 100644 index 0000000..9f174b0 --- /dev/null +++ b/manifests/haproxy-conjur-master.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: conjur-master + labels: + app: haproxy-conjur-master +spec: + ports: + - port: 443 + name: https + - port: 636 + name: ldaps + - port: 5432 + name: pg-main + selector: + app: haproxy-conjur-master + type: LoadBalancer + +--- +apiVersion: v1 +kind: Pod +metadata: + name: haproxy-conjur-master + labels: + app: haproxy-conjur-master +spec: + containers: + - name: haproxy + image: {{ DOCKER_IMAGE }} + imagePullPolicy: Always + ports: + - containerPort: 443 + name: https + - containerPort: 636 + name: ldaps + - containerPort: 5432 + name: pg-main + - containerPort: 5433 + name: pg-audit + imagePullSecrets: + - name: conjurregcred diff --git a/start b/start new file mode 100755 index 0000000..54ccb78 --- /dev/null +++ b/start @@ -0,0 +1,16 @@ +#!/bin/bash +set -euo pipefail + +./0_check_dependencies.sh + +./stop + +./1_create_conjur_namespace.sh +./2_build_and_push_containers.sh +./3_deploy_conjur_cluster.sh +./4_configure_master.sh +./5_create_load_balancer.sh +./6_configure_standbys.sh +./7_configure_followers.sh +./8_print_config.sh + diff --git a/stop b/stop new file mode 100755 index 0000000..4f8272c --- /dev/null +++ b/stop @@ -0,0 +1,26 @@ +#!/bin/bash +set -euo pipefail + +. utils.sh + +set_namespace default + +if has_namespace $CONJUR_NAMESPACE_NAME; then + kubectl delete namespace $CONJUR_NAMESPACE_NAME + + printf "Waiting for $CONJUR_NAMESPACE_NAME namespace deletion to complete" + + while : ; do + printf "..." + + if has_namespace "$CONJUR_NAMESPACE_NAME"; then + sleep 5 + else + break + fi + done + + echo "" +fi + +echo "Conjur environment purged." diff --git a/utils.sh b/utils.sh new file mode 100755 index 0000000..4045c1f --- /dev/null +++ b/utils.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +check_env_var() { + var_name=$1 + + if [ "${!var_name}" = "" ]; then + echo "You must set $1 before running these scripts." + exit 1 + fi +} + +announce() { + echo "++++++++++++++++++++++++++++++++++++++" + echo "" + echo "$@" + echo "" + echo "++++++++++++++++++++++++++++++++++++++" +} + +environment_domain() { + env_url=$(environment_url) + protocol="$(echo $env_url | grep :// | sed -e's,^\(.*://\).*,\1,g')" + echo ${env_url/$protocol/} +} + +has_namespace() { + if kubectl get namespace "$1" > /dev/null; then + true + else + false + fi +} + +docker_tag_and_push() { + docker_tag="${DOCKER_REGISTRY_PATH}/$1:$CONJUR_NAMESPACE_NAME" + docker tag $1:$CONJUR_NAMESPACE_NAME $docker_tag + docker push $docker_tag +} + +copy_file_to_container() { + local from=$1 + local to=$2 + local pod_name=$3 + + kubectl cp "$from" $pod_name:"$to" +} + +get_master_pod_name() { + pod_list=$(kubectl get pods -l app=conjur-node --no-headers | awk '{ print $1 }') + echo $pod_list | awk '{print $1}' +} + +get_master_service_ip() { + echo $(kubectl get service conjur-master -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +} + +mastercmd() { + local master_pod=$(kubectl get pod -l role=master --no-headers | awk '{ print $1 }') + local interactive=$1 + + if [ $interactive = '-i' ]; then + shift + kubectl exec -i $master_pod -- $@ + else + kubectl exec $master_pod -- $@ + fi +} + +set_namespace() { + # general utility for switching namespaces in kubernetes + # expects exactly 1 argument, a namespace name. + if [[ $# != 1 ]]; then + printf "Error in %s/%s - expecting 1 arg.\n" $(pwd) $0 + exit -1 + fi + + kubectl config set-context $(kubectl config current-context) --namespace="$1" > /dev/null +} + +wait_for_node() { + wait_for_it -1 "kubectl describe pod $1 | grep Status: | grep -q Running" +} + +function wait_for_it() { + local timeout=$1 + local spacer=2 + shift + + if ! [ $timeout = '-1' ]; then + local times_to_run=$((timeout / spacer)) + + echo "Waiting for $@ up to $timeout s" + for i in $(seq $times_to_run); do + eval $@ && echo 'Success!' && break + echo -n . + sleep $spacer + done + + eval $@ + else + echo "Waiting for $@ forever" + + while ! eval $@; do + echo -n . + sleep $spacer + done + echo 'Success!' + fi +} + +rotate_api_key() { + set_namespace $CONJUR_NAMESPACE_NAME + + master_pod_name=$(get_master_pod_name) + + kubectl exec $master_pod_name -- conjur authn login -u admin -p $CONJUR_ADMIN_PASSWORD > /dev/null + api_key=$(kubectl exec $master_pod_name -- conjur user rotate_api_key) + kubectl exec $master_pod_name -- conjur authn logout > /dev/null + + echo $api_key +}