-
Notifications
You must be signed in to change notification settings - Fork 30
/
test-prow-e2e.sh
executable file
·158 lines (129 loc) · 6.8 KB
/
test-prow-e2e.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
#!/usr/bin/env bash
set -eExuo pipefail
if [ $# -eq 0 ]
then
echo "odf-console image not provided"
echo "exiting..."
exit 1
fi
function generateLogsAndCopyArtifacts {
oc cluster-info dump > "${ARTIFACT_DIR}"/cluster_info.json
oc get secrets -A -o wide > "${ARTIFACT_DIR}"/secrets.yaml
oc get secrets -A -o yaml >> "${ARTIFACT_DIR}"/secrets.yaml
oc get catalogsource -A -o wide > "${ARTIFACT_DIR}"/catalogsource.yaml
oc get catalogsource -A -o yaml >> "${ARTIFACT_DIR}"/catalogsource.yaml
oc get subscriptions -n openshift-storage -o wide > "${ARTIFACT_DIR}"/subscription_details.yaml
oc get subscriptions -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/subscription_details.yaml
oc get csvs -n openshift-storage -o wide > "${ARTIFACT_DIR}"/csvs.yaml
oc get csvs -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/csvs.yaml
oc get deployments -n openshift-storage -o wide > "${ARTIFACT_DIR}"/deployment_details.yaml
oc get deployments -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/deployment_details.yaml
oc get installplan -n openshift-storage -o wide > "${ARTIFACT_DIR}"/installplan.yaml
oc get installplan -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/installplan.yaml
oc get nodes -o wide > "${ARTIFACT_DIR}"/node.yaml
oc get nodes -o yaml >> "${ARTIFACT_DIR}"/node.yaml
oc get pods -n openshift-storage -o wide >> "${ARTIFACT_DIR}"/pod_details_openshift-storage.yaml
oc get pods -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/pod_details_openshift-storage.yaml
oc get StorageCluster --ignore-not-found=true -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/storage-cluster.yaml
oc get NooBaa --ignore-not-found=true -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/noobaa.yaml
oc logs --previous=false deploy/odf-operator-controller-manager manager -n openshift-storage > "${ARTIFACT_DIR}"/odf.logs
for pod in $(oc get pods -n "${NS}" --no-headers -o custom-columns=":metadata.name" | grep "odf-console"); do
echo "$pod"
oc logs --previous=false "$pod" -n "${NS}" > "${ARTIFACT_DIR}"/"${pod}".logs
done
oc get serviceaccounts -n openshift-storage -o wide > "${ARTIFACT_DIR}"/serviceaccount.yaml
oc get serviceaccounts -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/serviceaccount.yaml
oc get console.v1.operator.openshift.io cluster -o yaml >> "${ARTIFACT_DIR}"/cluster.yaml
if [ -d "$ARTIFACT_DIR" ] && [ -d "$SCREENSHOTS_DIR" ]; then
if [[ -z "$(ls -A -- "$SCREENSHOTS_DIR")" ]]; then
echo "No artifacts were copied."
else
echo "Copying artifacts from $(pwd)..."
cp -r "$SCREENSHOTS_DIR" "${ARTIFACT_DIR}/gui-test-screenshots"
fi
fi
}
trap generateLogsAndCopyArtifacts EXIT
trap generateLogsAndCopyArtifacts ERR
NS="openshift-storage"
ARTIFACT_DIR=${ARTIFACT_DIR:=/tmp/artifacts}
SCREENSHOTS_DIR=gui-test-screenshots
oc patch operatorhub.config.openshift.io/cluster -p='{"spec":{"sources":[{"disabled":true,"name":"redhat-operators"}]}}' --type=merge
function patchPullSecret {
oc get -n openshift-config secret/pull-secret -ojson | jq -r '.data.".dockerconfigjson"' | base64 -d | jq '.' > secret.json
jq -c '.auths."quay.io".auth = "'${PULL_SECRET}'"' secret.json > temp-auth.json
jq '.auths."quay.io".email |=""' temp-auth.json > temp-secret.json
oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=temp-secret.json
rm temp-secret.json temp-auth.json secret.json
echo "Added Pull Secret"
}
echo "Updating the pull secret"
patchPullSecret
oc apply -f openshift-ci/odf-catalog-source.yaml ;
echo "Waiting for CatalogSource to be Ready"
timeout 10m bash <<-'EOF'
until [ "$(oc -n openshift-marketplace get catalogsource -o=jsonpath="{.items[?(@.metadata.name==\"redhat-operators\")].status.connectionState.lastObservedState}")" == "READY" ]; do
sleep 1
done
EOF
echo "Waiting for Catalog image's pod to be running"
timeout 5m bash <<-'EOF'
until [ "$(oc get pod -n openshift-storage rhceph-dev-icsp -o=jsonpath="{.status.phase}")" == "Running" ]; do
sleep 1
done
EOF
echo "Creating ImageContentSourcePolicy rules needed for ODF"
oc exec -it --namespace openshift-storage rhceph-dev-icsp -- cat /icsp.yaml | oc apply -f -
# Enable console plugin for ODF-Console
export CONSOLE_CONFIG_NAME="cluster"
export ODF_PLUGIN_NAME="odf-console"
echo "Enabling Console Plugin for ODF Operator"
oc patch console.v1.operator.openshift.io ${CONSOLE_CONFIG_NAME} --type=json -p="[{'op': 'add', 'path': '/spec/plugins', 'value':[${ODF_PLUGIN_NAME}]}]"
ODF_CONSOLE_IMAGE="$1"
echo "Waiting for CSV to exist"
timeout 5m bash <<-'EOF'
until [ ! -z "$(oc get csv -n openshift-storage -o=jsonpath='{.items[?(@.spec.displayName=="OpenShift Data Foundation")].metadata.name}')" ]; do
sleep 1
done
EOF
# [SC2155]
ODF_CSV_NAME="$(oc get csv -n openshift-storage -o=jsonpath='{.items[?(@.spec.displayName=="OpenShift Data Foundation")].metadata.name}')"
export ODF_CSV_NAME
oc patch csv "${ODF_CSV_NAME}" -n openshift-storage --type='json' -p \
"[{'op': 'replace', 'path': '/spec/install/spec/deployments/1/spec/template/spec/containers/0/image', 'value': \"${ODF_CONSOLE_IMAGE}\"}]"
# Installation occurs.
# This is also the default case if the CSV is in "Installing" state initially.
timeout 15m bash <<-'EOF'
echo "waiting for ${ODF_CSV_NAME} clusterserviceversion to succeed"
until [ "$(oc -n openshift-storage get csv -o=jsonpath="{.items[?(@.metadata.name==\"${ODF_CSV_NAME}\")].status.phase}")" == "Succeeded" ]; do
sleep 1
done
EOF
# Check the status of the odf-console container. Sometimes even if the csv has succeeded (and the odf-console pod status phase is 'Running'),
# the odf-console container can have an unhealthy status (so the E2E tests will fail).
odf_console_container_status=""
for pod in $(oc get pods -n "${NS}" --no-headers -o custom-columns=":metadata.name" | grep "odf-console"); do
odf_console_container_status="$(oc -n openshift-storage get pod -o jsonpath='{.status.containerStatuses[0].state}' ${pod} | jq -r 'keys'[0])"
echo "${pod} container status: ${odf_console_container_status}"
if [[ "${odf_console_container_status}" == "running" ]]; then
break;
fi
done
if [[ "${odf_console_container_status}" != "running" ]]; then
echo "ERROR: odf-console container is not running."
exit 1
fi
INSTALLER_DIR=${INSTALLER_DIR:=${ARTIFACT_DIR}/installer}
BRIDGE_KUBEADMIN_PASSWORD="$(cat "${KUBEADMIN_PASSWORD_FILE:-${INSTALLER_DIR}/auth/kubeadmin-password}")"
export BRIDGE_KUBEADMIN_PASSWORD
BRIDGE_BASE_ADDRESS="$(oc get consoles.config.openshift.io cluster -o jsonpath='{.status.consoleURL}')"
export BRIDGE_BASE_ADDRESS
# Disable color codes in Cypress since they do not render well CI test logs.
# https://docs.cypress.io/guides/guides/continuous-integration.html#Colors
export NO_COLOR=1
# Install dependencies.
yarn install
# Run tests.
yarn run test-cypress-headless
# Generate Cypress report.
yarn run cypress-postreport