Skip to content

Commit

Permalink
automate gcp creation
Browse files Browse the repository at this point in the history
  • Loading branch information
ManuelDittmar committed Jan 24, 2024
1 parent d411964 commit f838872
Show file tree
Hide file tree
Showing 6 changed files with 424 additions and 4 deletions.
101 changes: 101 additions & 0 deletions google/multi-region/active-active/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# ------------------------------------
# Set the following for your specific environment
# Already have a Cluster? Set these values to point to your existing environment
# Otherwise, these values will be used to create a new Cluster

# GCP project
project ?= camunda-researchanddevelopment
# GCP region 0
region0 ?=us-east1-c# region = zone for simplicity
# GCP region 1
region1 ?= europe-west1-b
# GKE cluster name 0
clusterName0 ?= manus-region-0
# GKE cluster name 1
clusterName1 ?= manus-region-1
# Firewall rule name
firewallRule ?= zeebe-between-clusters-manu
# Brokers per Region
brokersPerRegion = 4


# ------------------------------------
# The following variables should not be changed except for advanced use cases
ifeq ($(OS),Windows_NT)
root ?= $(CURDIR)/../../../..
else
root ?= $(shell pwd)/../../../..
endif


.PHONY: gcp-create-cluster
gcp-create-cluster:
gcloud config set project $(project)
gcloud container clusters create $(clusterName0) \
--region $(region0) \
--num-nodes=1 \
--enable-autoscaling --max-nodes=24 --min-nodes=1 \
--enable-ip-alias \
--machine-type=n2-standard-2 \
--disk-type "pd-ssd" \
--spot \
--maintenance-window=4:00 \
--release-channel=regular \
--cluster-version=latest
gcloud container clusters create $(clusterName1) \
--region $(region1) \
--num-nodes=1 \
--enable-autoscaling --max-nodes=24 --min-nodes=1 \
--enable-ip-alias \
--machine-type=n2-standard-2 \
--disk-type "pd-ssd" \
--spot \
--maintenance-window=4:00 \
--release-channel=regular \
--cluster-version=latest

.PHONY: gcp-firewall
gcp-firewall:
networkTag0=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region0)_$(clusterName0)) --zone $(region0) --format="get(tags.items)"); \
networkTag1=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region1)_$(clusterName1)) --zone $(region1) --format="get(tags.items)"); \
ipRange0=$$(gcloud container clusters describe $(clusterName0) --zone $(region0) --format='value(clusterIpv4Cidr)'); \
ipRange1=$$(gcloud container clusters describe $(clusterName1) --zone $(region1) --format='value(clusterIpv4Cidr)'); \
gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRange0,$$ipRange1 --target-tags=$$networkTag0,$$networkTag1

.PHONY: dns-chaining
dns-chaining:
python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion)
## TODO teardown

.PHONY: generate-camunda-values
generate-camunda-values:
@contactPoints=$$(/bin/bash -c ' \
join_addrs=(); \
for region in $(region0) $(region1); do \
for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \
join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \
done; \
done; \
IFS=,; echo "$${join_addrs[*]}";'); \
echo "Initial contact points: $$contactPoints"; \
cp camunda-values-template.yaml camunda-values-region-0.yaml; \
sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \
sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \
sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \
cp camunda-values-template.yaml camunda-values-region-1.yaml; \
sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \
sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \
sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml

.PHONY: install-camunda
install-camunda:
kubectl config use-context gke_$(project)_$(region0)_$(clusterName0)
kubectl create namespace $(region0)
helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0)
kubectl config use-context gke_$(project)_$(region1)_$(clusterName1)
kubectl create namespace $(region1)
helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1)

.PHONY: setup-mraa-gcp
setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda

102 changes: 102 additions & 0 deletions google/multi-region/active-active/camunda-values-region-0.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Chart values for the Camunda Platform 8 Helm chart.
# This file deliberately contains only the values that differ from the defaults.
# For changes and documentation, use your favorite diff tool to compare it with:
# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml

global:
# Multiregion options for Zeebe
#
## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production.
# This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites.
# If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case.
# If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup.
# Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks.
multiregion:
# number of regions that this Camunda Platform instance is stretched across
regions: 2
# unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1.
regionId: 0
identity:
auth:
# Disable the Identity authentication
# it will fall back to basic-auth: demo/demo as default user
enabled: false

operate:
env:
- name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME
value: "camunda_backup"
tasklist:
env:
- name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME
value: "camunda_backup"

identity:
enabled: false

optimize:
enabled: false

connectors:
enabled: true
inbound:
mode: credentials
resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "1000m"
memory: "2Gi"
env:
- name: CAMUNDA_OPERATE_CLIENT_USERNAME
value: demo
- name: CAMUNDA_OPERATE_CLIENT_PASSWORD
value: demo

zeebe:
clusterSize: 8
partitionCount: 8
replicationFactor: 4
env:
- name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD
value: "5m"
- name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK
value: "0.85"
- name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK
value: "0.87"
- name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS
value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME
value: "io.camunda.zeebe.exporter.ElasticsearchExporter"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL
value: "http://elasticsearch-master-headless.europe-west1-b.svc.cluster.local:9200"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE
value: "1"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX
value: "zeebe-record"
pvcSize: 1Gi

resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "512m"
memory: "2Gi"

zeebe-gateway:
replicas: 1

resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "1000m"
memory: "1Gi"

logLevel: ERROR

elasticsearch:
enabled: true
102 changes: 102 additions & 0 deletions google/multi-region/active-active/camunda-values-region-1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Chart values for the Camunda Platform 8 Helm chart.
# This file deliberately contains only the values that differ from the defaults.
# For changes and documentation, use your favorite diff tool to compare it with:
# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml

global:
# Multiregion options for Zeebe
#
## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production.
# This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites.
# If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case.
# If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup.
# Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks.
multiregion:
# number of regions that this Camunda Platform instance is stretched across
regions: 2
# unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1.
regionId: 1
identity:
auth:
# Disable the Identity authentication
# it will fall back to basic-auth: demo/demo as default user
enabled: false

operate:
env:
- name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME
value: "camunda_backup"
tasklist:
env:
- name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME
value: "camunda_backup"

identity:
enabled: false

optimize:
enabled: false

connectors:
enabled: true
inbound:
mode: credentials
resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "1000m"
memory: "2Gi"
env:
- name: CAMUNDA_OPERATE_CLIENT_USERNAME
value: demo
- name: CAMUNDA_OPERATE_CLIENT_PASSWORD
value: demo

zeebe:
clusterSize: 8
partitionCount: 8
replicationFactor: 4
env:
- name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD
value: "5m"
- name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK
value: "0.85"
- name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK
value: "0.87"
- name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS
value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME
value: "io.camunda.zeebe.exporter.ElasticsearchExporter"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL
value: "http://elasticsearch-master-headless.us-east1-c.svc.cluster.local:9200"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE
value: "1"
- name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX
value: "zeebe-record"
pvcSize: 1Gi

resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "512m"
memory: "2Gi"

zeebe-gateway:
replicas: 1

resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "1000m"
memory: "1Gi"

logLevel: ERROR

elasticsearch:
enabled: true
Loading

0 comments on commit f838872

Please sign in to comment.