forked from stolostron/bootstrap-ks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
provision.sh
executable file
·143 lines (116 loc) · 4.87 KB
/
provision.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/bin/bash
# Color codes for bash output
BLUE='\e[36m'
GREEN='\e[32m'
RED='\e[31m'
YELLOW='\e[33m'
CLEAR='\e[39m'
# Help for MacOS
export LC_ALL=C
#----DEFAULTS----#
# Generate a 5-digit random cluster identifier for resource tagging purposes
RANDOM_IDENTIFIER=$(head /dev/urandom | LC_CTYPE=C tr -dc a-z0-9 | head -c 2 ; echo '')
# Ensure USER has a value
if [ -z "$JENKINS_HOME" ]; then
USER=${USER:-"unknown"}
else
USER=${USER:-"jenkins"}
fi
# Ensure ADMIN_USER/PASSWORD have values
ADMIN_USER=${ADMIN_USER:-"Cluster-Admin"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"`head /dev/urandom | LC_CTYPE=C tr -dc A-Za-z0-9 | head -c 80 ; echo ''`"}
SHORTNAME=$(echo $USER | head -c 7)
# Generate a default resource name
RESOURCE_NAME="$SHORTNAME-$RANDOM_IDENTIFIER"
NAME_SUFFIX="odaw"
# Default to us-east-1
AWS_REGION=${AWS_REGION:-"us-east-1"}
AWS_NODE_COUNT=${AWS_NODE_COUNT:-"3"}
AWS_MACHINE_TYPE=${AWS_MACHINE_TYPE:-"m5.xlarge"}
# OCM_URL can be one of: 'production', 'staging', 'integration'
OCM_URL=${OCM_URL:-"staging"}
#----VALIDATE ENV VARS----#
# Validate that we have all required env vars and exit with a failure if any are missing
missing=0
if [ -z "$AWS_ACCOUNT_ID" ]; then
printf "${RED}AWS_ACCOUNT_ID env var not set. flagging for exit.${CLEAR}\n"
missing=1
fi
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
printf "${RED}AWS_ACCESS_KEY_ID env var not set. flagging for exit.${CLEAR}\n"
missing=1
fi
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
printf "${RED}AWS_SECRET_ACCESS_KEY env var not set. flagging for exit.${CLEAR}\n"
missing=1
fi
if [ -z "$OCM_TOKEN" ]; then
printf "${RED}OCM_TOKEN env var not set. flagging for exit.${CLEAR}\n"
missing=1
fi
if [ "$missing" -ne 0 ]; then
exit $missing
fi
if [ ! -z "$CLUSTER_NAME" ]; then
RESOURCE_NAME="$CLUSTER_NAME-$RANDOM_IDENTIFIER"
fi
printf "${BLUE}Using $RESOURCE_NAME to identify all created resources.${CLEAR}\n"
#----VERIFY ocm CLI----#
if [ -z "$(which ocm)" ]; then
printf "${RED}Could not find the ocm cli, exiting. Try running ./install.sh.${CLEAR}\n"
exit 1
fi
#----SIGN IN TO ocm----#
if [ -f ~/.ocm.json ]; then
REFRESH_TOKEN=`cat ~/.ocm.json | jq -r '.refresh_token'`
CLIENT_ID=`cat ~/.ocm.json | jq -r '.client_id'`
curl --silent https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token -d grant_type=refresh_token -d client_id=$CLIENT_ID -d refresh_token=$REFRESH_TOKEN > /dev/null
else
ocm login --token=$OCM_TOKEN --url $OCM_URL
fi
#----CREATE CLUSTER----#
OSDAWS_CLUSTER_NAME="${RESOURCE_NAME}-${NAME_SUFFIX}"
printf "${BLUE}Creating an OSD cluster on AWS named ${OSDAWS_CLUSTER_NAME}.${CLEAR}\n"
ocm create cluster --ccs --aws-access-key-id $AWS_ACCESS_KEY_ID --aws-account-id $AWS_ACCOUNT_ID --aws-secret-access-key $AWS_SECRET_ACCESS_KEY --compute-nodes $AWS_NODE_COUNT --compute-machine-type $AWS_MACHINE_TYPE --region $AWS_REGION $OSDAWS_CLUSTER_NAME
if [ "$?" -ne 0 ]; then
printf "${RED}Failed to provision cluster. See error above. Exiting${CLEAR}\n"
exit 1
fi
printf "${GREEN}Successfully provisioned cluster ${OSDAWS_CLUSTER_NAME}.${CLEAR}\n"
CLUSTER_NAME=$OSDAWS_CLUSTER_NAME
printf "${GREEN}Cluster name: '${CLUSTER_NAME}${CLEAR}'\n"
CLUSTER_ID=`ocm list clusters --parameter search="name like '${CLUSTER_NAME}'" --no-headers | awk '{ print $1 }'`
printf "${GREEN}Cluster ID: '${CLUSTER_ID}${CLEAR}'\n"
CLUSTER_DOMAIN=`ocm get /api/clusters_mgmt/v1/clusters/$CLUSTER_ID | jq -r '.dns.base_domain'`
printf "${GREEN}Cluster domain: '${CLUSTER_DOMAIN}${CLEAR}'\n"
# Configure IDP and users
# Need to loop over this - to wait until it comes available
while ! ocm create idp --cluster=$CLUSTER_NAME --type htpasswd --name htpasswd --username ${ADMIN_USER} --password ${ADMIN_PASSWORD}
do
printf "${YELLOW}Waiting for cluster to become active...${CLEAR}\n"
sleep 30
done
printf "${GREEN}Adding user ${ADMIN_USER} as admin.${CLEAR}\n"
ocm create user ${ADMIN_USER} --cluster=$CLUSTER_ID --group=cluster-admins
ocm create user ${ADMIN_USER} --cluster=$CLUSTER_ID --group=dedicated-admins
#-----DUMP STATE FILE----#
LOGIN_URL=https://console-openshift-console.apps.$OSDAWS_CLUSTER_NAME.$CLUSTER_DOMAIN
STATE_FILE=$(pwd)/${OSDAWS_CLUSTER_NAME}.json
cat > $(pwd)/${OSDAWS_CLUSTER_NAME}.json <<EOF
{
"CLUSTER_NAME": "${OSDAWS_CLUSTER_NAME}",
"CLUSTER_ID": "${CLUSTER_ID}",
"REGION": "${AWS_REGION}",
"USERNAME": "${ADMIN_USER}",
"PASSWORD": "${ADMIN_PASSWORD}",
"LOGIN_URL": "${LOGIN_URL}",
"OCM_URL": "${OCM_URL}",
"PLATFORM": "OSD-AWS"
}
EOF
printf "${GREEN}Cluster provision successful. Cluster named ${OSDAWS_CLUSTER_NAME} created. \n"
printf "${GREEN}Console URL: ${LOGIN_URL}\n${CLEAR}"
printf "${GREEN}Username: ${ADMIN_USER}\n${CLEAR}"
printf "${GREEN}Password: *****\n${CLEAR}"
printf "${GREEN}Full Password and username can be found in ${STATE_FILE}\n${CLEAR}"
printf "${GREEN}To destroy this cluster run './destroy.sh ${STATE_FILE}'\n${CLEAR}"