forked from openshift-metal3/dev-scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
07_deploy_masters.sh
executable file
·83 lines (69 loc) · 3.21 KB
/
07_deploy_masters.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
#!/usr/bin/bash
set -eux
source utils.sh
source common.sh
source ocp_install_env.sh
# Note This logic will likely run in a container (on the bootstrap VM)
# for the final solution, but for now we'll prototype the workflow here
export OS_TOKEN=fake-token
export OS_URL=http://localhost:6385/
wait_for_json ironic \
"${OS_URL}/v1/nodes" \
10 \
-H "Accept: application/json" -H "Content-Type: application/json" -H "User-Agent: wait-for-json" -H "X-Auth-Token: $OS_TOKEN"
if [ $(sudo podman ps | grep -w -e "ironic$" -e "ironic-inspector$" | wc -l) != 2 ] ; then
echo "Can't find required containers"
exit 1
fi
# Clean previously env
nodes=$(openstack baremetal node list)
for node in $(jq -r .nodes[].name ${MASTER_NODES_FILE}); do
if [[ $nodes =~ $node ]]; then
openstack baremetal node undeploy $node --wait || true
openstack baremetal node delete $node
fi
done
openstack baremetal create $MASTER_NODES_FILE
mkdir -p configdrive/openstack/latest
cp ocp/master.ign configdrive/openstack/latest/user_data
for node in $(jq -r .nodes[].name $MASTER_NODES_FILE); do
# FIXME(shardy) we should parameterize the image
openstack baremetal node set $node --instance-info "image_source=http://172.22.0.1/images/$RHCOS_IMAGE_FILENAME_LATEST" --instance-info image_checksum=$(curl http://172.22.0.1/images/$RHCOS_IMAGE_FILENAME_LATEST.md5sum) --instance-info root_gb=25 --property root_device="{\"name\": \"$ROOT_DISK\"}"
openstack baremetal node manage $node --wait
openstack baremetal node provide $node --wait
done
for node in $(jq -r .nodes[].name $MASTER_NODES_FILE); do
openstack baremetal node deploy --config-drive configdrive $node
done
# Note we have to tolerate failure with the || echo 0 due to this issue
# https://storyboard.openstack.org/#!/story/2005093
NUM_ACTIVE=$(openstack baremetal node list --fields name --fields provision_state | grep master | grep active | wc -l || echo 0)
while [ "$NUM_ACTIVE" != "3" ]; do
if openstack baremetal node list --fields name --fields provision_state | grep master | grep -e error -e failed; then
openstack baremetal node list
echo "Error detected waiting for baremetal nodes to become active" >&2
exit 1
fi
sleep 10
NUM_ACTIVE=$(openstack baremetal node list --fields name --fields provision_state | grep master | grep active | wc -l || echo 0)
done
echo "Master nodes active"
openstack baremetal node list
NUM_LEASES=$(sudo virsh net-dhcp-leases baremetal | grep master | wc -l)
while [ "$NUM_LEASES" -ne 3 ]; do
sleep 10
NUM_LEASES=$(sudo virsh net-dhcp-leases baremetal | grep master | wc -l)
done
echo "Master nodes up, you can ssh to the following IPs with core@<IP>"
sudo virsh net-dhcp-leases baremetal
while [[ ! $(timeout -k 9 5 $SSH "core@api.${CLUSTER_NAME}.${BASE_DOMAIN}" hostname) =~ master- ]]; do
echo "Waiting for the master API to become ready..."
sleep 10
done
NODES_ACTIVE=$(oc --config ocp/auth/kubeconfig get nodes | grep "master-[0-2] *Ready" | wc -l)
while [ "$NODES_ACTIVE" -ne 3 ]; do
sleep 10
NODES_ACTIVE=$(oc --config ocp/auth/kubeconfig get nodes | grep "master-[0-2] *Ready" | wc -l)
done
oc --config ocp/auth/kubeconfig get nodes
echo "Cluster up, you can interact with it via oc --config ocp/auth/kubeconfig <command>"