-
Notifications
You must be signed in to change notification settings - Fork 18
/
setup.sh
executable file
·296 lines (243 loc) · 10.5 KB
/
setup.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
#!/usr/bin/env bash
set -eo pipefail
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)
function variables_from_context() {
# Create EKS cluster without nodes
# Generate a new kubeconfig file in the local directory
KUBECONFIG=".kubeconfig"
# extract details form the ecktl configuration file
CLUSTER_NAME=$(yq eval '.metadata.name' "${EKSCTL_CONFIG}")
AWS_REGION=$(yq eval '.metadata.region' "${EKSCTL_CONFIG}")
ACCOUNT_ID=$(${AWS_CMD} sts get-caller-identity | jq -r .Account)
# use the default bucket?
if [ -z "${CONTAINER_REGISTRY_BUCKET}" ]; then
CONTAINER_REGISTRY_BUCKET="container-registry-${CLUSTER_NAME}-${ACCOUNT_ID}"
fi
CREATE_S3_BUCKET="false"
if ! "${AWS_CMD}" s3api head-bucket --bucket "${CONTAINER_REGISTRY_BUCKET}" >/dev/null 2>&1; then
CREATE_S3_BUCKET="true"
fi
export KUBECONFIG
export CLUSTER_NAME
export AWS_REGION
export ACCOUNT_ID
export CREATE_S3_BUCKET
export CONTAINER_REGISTRY_BUCKET
}
function check_prerequisites() {
EKSCTL_CONFIG=$1
if [ ! -f "${EKSCTL_CONFIG}" ]; then
echo "The eksctl configuration file ${EKSCTL_CONFIG} does not exist."
exit 1
else
echo "Using eksctl configuration file: ${EKSCTL_CONFIG}"
fi
export EKSCTL_CONFIG
if [ -z "${CERTIFICATE_ARN}" ]; then
echo "Missing CERTIFICATE_ARN environment variable."
exit 1;
fi
if [ -z "${DOMAIN}" ]; then
echo "Missing DOMAIN environment variable."
exit 1;
fi
AWS_CMD="aws"
if [ -z "${AWS_PROFILE}" ]; then
echo "Missing (optional) AWS profile."
unset AWS_PROFILE
else
echo "Using the AWS profile: ${AWS_PROFILE}"
AWS_CMD="aws --profile ${AWS_PROFILE}"
fi
export AWS_CMD
if [ -z "${ROUTE53_ZONEID}" ]; then
echo "Missing (optional) ROUTE53_ZONEID environment variable."
echo "Please configure the CNAME with the URL of the load balancer manually."
else
echo "Using external-dns. No manual intervention required."
fi
}
# Bootstrap AWS CDK - https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html
function ensure_aws_cdk() {
pushd /tmp > /dev/null 2>&1; cdk bootstrap "aws://${ACCOUNT_ID}/${AWS_REGION}"; popd > /dev/null 2>&1
}
function install() {
check_prerequisites "$1"
variables_from_context
ensure_aws_cdk
# Check the certificate exists
if ! ${AWS_CMD} acm describe-certificate --certificate-arn "${CERTIFICATE_ARN}" --region "${AWS_REGION}" >/dev/null 2>&1; then
echo "The secret ${CERTIFICATE_ARN} does not exist."
exit 1
fi
if ! eksctl get cluster "${CLUSTER_NAME}" > /dev/null 2>&1; then
# https://eksctl.io/usage/managing-nodegroups/
eksctl create cluster --config-file "${EKSCTL_CONFIG}" --without-nodegroup --kubeconfig ${KUBECONFIG}
else
aws eks update-kubeconfig --name "${CLUSTER_NAME}"
fi
# Disable default AWS CNI provider.
# The reason for this change is related to the number of containers we can have in ec2 instances
# https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt
# https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html
kubectl patch ds -n kube-system aws-node -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
# Install Calico.
kubectl apply -f https://docs.projectcalico.org/manifests/calico-vxlan.yaml
# Create secret with container registry credentials
if [ -n "${IMAGE_PULL_SECRET_FILE}" ] && [ -f "${IMAGE_PULL_SECRET_FILE}" ]; then
kubectl create secret generic gitpod-image-pull-secret \
--from-file=.dockerconfigjson="${IMAGE_PULL_SECRET_FILE}" \
--type=kubernetes.io/dockerconfigjson >/dev/null 2>&1 || true
fi
if ${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" > /dev/null 2>&1; then
KUBECTL_ROLE_ARN=$(${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" | jq -r .Role.Arn)
else
echo "Creating Role for EKS access"
# Create IAM role and mapping to Kubernetes user and groups.
POLICY=$(echo -n '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::'; echo -n "$ACCOUNT_ID"; echo -n ':root"},"Action":"sts:AssumeRole","Condition":{}}]}')
KUBECTL_ROLE_ARN=$(${AWS_CMD} iam create-role \
--role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" \
--description "Kubernetes role (for AWS IAM Authenticator for Kubernetes)." \
--assume-role-policy-document "$POLICY" \
--output text \
--query 'Role.Arn')
fi
export KUBECTL_ROLE_ARN
# check if the identity mapping already exists
# Manage IAM users and roles https://eksctl.io/usage/iam-identity-mappings/
if ! eksctl get iamidentitymapping --cluster "${CLUSTER_NAME}" --arn "${KUBECTL_ROLE_ARN}" > /dev/null 2>&1; then
echo "Creating mapping from IAM role ${KUBECTL_ROLE_ARN}"
eksctl create iamidentitymapping \
--cluster "${CLUSTER_NAME}" \
--arn "${KUBECTL_ROLE_ARN}" \
--username eksadmin \
--group system:masters
fi
# Create cluster nodes defined in the configuration file
eksctl create nodegroup --config-file="${EKSCTL_CONFIG}"
# Restart tigera-operator
kubectl delete pod -n tigera-operator -l k8s-app=tigera-operator > /dev/null 2>&1
MYSQL_GITPOD_USERNAME="gitpod"
MYSQL_GITPOD_PASSWORD=$(openssl rand -hex 18)
MYSQL_GITPOD_SECRET="mysql-gitpod-token"
MYSQL_GITPOD_ENCRYPTION_KEY='[{"name":"general","version":1,"primary":true,"material":"4uGh1q8y2DYryJwrVMHs0kWXJlqvHWWt/KJuNi04edI="}]'
SECRET_STORAGE="object-storage-gitpod-token"
# generated password cannot excede 41 characters (RDS limitation)
SSM_KEY="/gitpod/cluster/${CLUSTER_NAME}/region/${AWS_REGION}"
${AWS_CMD} ssm put-parameter \
--overwrite \
--name "${SSM_KEY}" \
--type String \
--value "${MYSQL_GITPOD_PASSWORD}" \
--region "${AWS_REGION}" > /dev/null 2>&1
# deploy CDK stacks
cdk deploy \
--context clusterName="${CLUSTER_NAME}" \
--context region="${AWS_REGION}" \
--context domain="${DOMAIN}" \
--context certificatearn="${CERTIFICATE_ARN}" \
--context identityoidcissuer="$(${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.identity.oidc.issuer" --output text --region "${AWS_REGION}")" \
--require-approval never \
--outputs-file cdk-outputs.json \
--all
output_config
}
function output_config() {
MYSQL_HOST=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRDS")).value.MysqlEndpoint ' < cdk-outputs.json)
S3_ACCESS_KEY=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRegistry")).value.AccessKeyId ' < cdk-outputs.json)
S3_SECRET_KEY=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRegistry")).value.SecretAccessKey ' < cdk-outputs.json)
cat << EOF
==========================
🎉🥳🔥🧡🚀
Your cloud infrastructure is ready to install Gitpod. Please visit
https://www.gitpod.io/docs/self-hosted/latest/getting-started#step-4-install-gitpod
for your next steps.
Passwords may change on subsequents runs of this guide.
=================
Config Parameters
=================
Domain Name: ${DOMAIN}
Database
========
Host: ${MYSQL_HOST}
Username: ${MYSQL_GITPOD_USERNAME}
Password: ${MYSQL_GITPOD_PASSWORD}
Port: 3306
Container Registry Storage
========
S3 BUCKET NAME: ${CONTAINER_REGISTRY_BUCKET}
S3 ACCESS KEY: ${S3_ACCESS_KEY}
S3 SECRET KEY: ${S3_SECRET_KEY}
TLS Certificates
================
Issuer name: gitpod-selfsigned-issuer
Issuer type: Issuer
Once Gitpod is installed, and the DNS records are updated, Run the following commands:
# remove shiftfs-module-loader container.
# TODO: remove once the container is removed from the installer
kubectl patch daemonset ws-daemon --type json -p='[{"op": "remove", "path": "/spec/template/spec/initContainers/3"}]'
# Use the following URL for DNS
kubectl get ingress gitpod -o json | jq -r .status.loadBalancer.ingress[0].hostname
EOF
}
function uninstall() {
check_prerequisites "$1"
variables_from_context
read -p "Are you sure you want to delete: Gitpod, Services/Registry, Services/RDS, Services, Addons, Setup (y/n)? " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]; then
if ! ${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --region "${AWS_REGION}" > /dev/null; then
exit 1
fi
KUBECTL_ROLE_ARN=$(${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" | jq -r .Role.Arn)
export KUBECTL_ROLE_ARN
SSM_KEY="/gitpod/cluster/${CLUSTER_NAME}/region/${AWS_REGION}"
cdk destroy \
--context clusterName="${CLUSTER_NAME}" \
--context region="${AWS_REGION}" \
--context domain="${DOMAIN}" \
--context certificatearn="${CERTIFICATE_ARN}" \
--context identityoidcissuer="$(${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.identity.oidc.issuer" --output text --region "${AWS_REGION}")" \
--require-approval never \
--force \
--all \
&& cdk context --clear \
&& eksctl delete cluster "${CLUSTER_NAME}" \
&& ${AWS_CMD} ssm delete-parameter --name "${SSM_KEY}" --region "${AWS_REGION}"
fi
}
function auth() {
AUTHPROVIDERS_CONFIG=${1:="auth-providers-patch.yaml"}
if [ ! -f "${AUTHPROVIDERS_CONFIG}" ]; then
echo "The auth provider configuration file ${AUTHPROVIDERS_CONFIG} does not exist."
exit 1
else
echo "Using the auth providers configuration file: ${AUTHPROVIDERS_CONFIG}"
fi
# Patching the configuration with the user auth provider/s
kubectl --kubeconfig .kubeconfig patch configmap auth-providers-config --type merge --patch "$(cat ${AUTHPROVIDERS_CONFIG})"
# Restart the server component
kubectl --kubeconfig .kubeconfig rollout restart deployment/server
}
function main() {
if [[ $# -ne 1 ]]; then
echo "Usage: $0 [--install|--uninstall]"
exit
fi
case $1 in
'--install')
install "eks-cluster.yaml"
;;
'--uninstall')
uninstall "eks-cluster.yaml"
;;
'--auth')
auth "auth-providers-patch.yaml"
;;
*)
echo "Unknown command: $1"
echo "Usage: $0 [--install|--uninstall]"
;;
esac
echo "done"
}
main "$@"