forked from linkerd/linkerd2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
_test-helpers.sh
623 lines (540 loc) · 18.8 KB
/
_test-helpers.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
#!/usr/bin/env bash
# Override CI's `set -e` default, so we can catch errors manually and display
# proper messages
set +e
##### Test setup helpers #####
export default_test_names=(deep external-issuer external-prometheus-deep helm-deep helm-upgrade uninstall upgrade-edge upgrade-stable default-policy-deny)
export external_resource_test_names=(external-resources)
export all_test_names=(cluster-domain cni-calico-deep multicluster "${default_test_names[*]}" "${external_resource_test_names[*]}")
images_load_default=(proxy controller policy-controller web metrics-api tap)
tests_usage() {
progname="${0##*/}"
echo "Run Linkerd integration tests.
Optionally specify a test with the --name flag: [${all_test_names[*]}]
Note: The cluster-domain, cni-calico-deep and multicluster tests require a custom cluster configuration (see bin/_test-helpers.sh)
Usage:
${progname} [--images docker|archive|skip] [--name test-name] [--skip-cluster-create] /path/to/linkerd
Examples:
# Run all tests in isolated clusters
${progname} /path/to/linkerd
# Run single test in isolated clusters
${progname} --name test-name /path/to/linkerd
# Skip k3d cluster creation and run all tests in default cluster context
${progname} --skip-cluster-create /path/to/linkerd
# Load images from tar files located under the 'image-archives' directory
# Note: This is primarily for CI
${progname} --images archive /path/to/linkerd
Available Commands:
--name: the argument to this option is the specific test to run
--skip-cluster-create: skip k3d cluster creation step and run tests in an existing cluster
--skip-cluster-delete: if the tests succeed, don't delete the created resources nor the cluster
--images: set to 'docker' (default) to load images into the cluster from the local docker cache;
set to 'preload' to also load them from the local docker cache, after having pulled them from
a public registry (appears to be faster than having k3d pulling them itself);
set to 'archive' to load the images from tar files located under the image-archives directory
--cleanup-docker: delete the 'images-archive' directory and prune the docker cache"
}
cleanup_usage() {
progname="${0##*/}"
echo "Cleanup Linkerd integration tests.
Usage:
${progname} [--context k8s_context] /path/to/linkerd
Examples:
# Cleanup tests in non-default context
${progname} --context k8s_context /path/to/linkerd
Available Commands:
--context: use a non-default k8s context"
}
handle_tests_input() {
export images="docker"
export test_name=''
export skip_cluster_create=''
export skip_cluster_delete=''
export cleanup_docker=''
export linkerd_path=""
while [ "$#" -ne 0 ]; do
case $1 in
-h|--help)
tests_usage "$0"
exit 0
;;
--images)
images=$2
if [ -z "$images" ]; then
echo 'Error: the argument for --images was not specified' >&2
tests_usage "$0" >&2
exit 64
fi
if [[ $images != "docker" && $images != "archive" && $images != "preload" ]]; then
echo 'Error: the argument for --images was invalid' >&2
tests_usage "$0" >&2
exit 64
fi
shift
shift
;;
--name)
test_name=$2
if [ -z "$test_name" ]; then
echo 'Error: the argument for --name was not specified' >&2
tests_usage "$0" >&2
exit 64
fi
shift
shift
;;
--skip-cluster-create)
skip_cluster_create=1
shift
;;
--skip-cluster-delete)
skip_cluster_delete=1
shift
;;
--cleanup-docker)
cleanup_docker=1
shift
;;
*)
if echo "$1" | grep -q '^-.*' ; then
echo "Unexpected flag: $1" >&2
tests_usage "$0" >&2
exit 64
fi
if [ -n "$linkerd_path" ]; then
echo "Multiple linkerd paths specified:" >&2
echo " $linkerd_path" >&2
echo " $1" >&2
tests_usage "$0" >&2
exit 64
fi
linkerd_path="$1"
shift
;;
esac
done
if [ -z "$linkerd_path" ]; then
echo "Error: path to linkerd binary is required" >&2
tests_usage "$0" >&2
exit 64
fi
if [ -z "$test_name" ] && [ -n "$skip_cluster_delete" ]; then
echo "Error: must provide --name when using --skip-cluster-delete" >&2
tests_usage "$0" >&2
exit 64
fi
}
handle_cleanup_input() {
export k8s_context=""
export linkerd_path=""
while [ "$#" -ne 0 ]; do
case $1 in
-h|--help)
cleanup_usage "$0"
exit 0
;;
--context)
k8s_context=$2
shift
shift
;;
*)
if echo "$1" | grep -q '^-.*' ; then
echo "Unexpected flag: $1" >&2
cleanup_usage "$0" >&2
exit 64
fi
if [ -n "$linkerd_path" ]; then
echo "Multiple linkerd paths specified:" >&2
echo " $linkerd_path" >&2
echo " $1" >&2
cleanup_usage "$0" >&2
exit 64
fi
linkerd_path="$1"
shift
;;
esac
done
if [ -z "$linkerd_path" ]; then
echo "Error: path to linkerd binary is required" >&2
cleanup_usage "$0" >&2
exit 64
fi
}
test_setup() {
bindir=$( cd "${BASH_SOURCE[0]%/*}" && pwd )
export bindir
export test_directory="$bindir"/../test/integration
check_linkerd_binary
}
check_linkerd_binary() {
printf 'Checking the linkerd binary...'
if [[ "$linkerd_path" != /* ]]; then
printf '\n[%s] is not an absolute path\n' "$linkerd_path"
exit 1
fi
if [ ! -x "$linkerd_path" ]; then
printf '\n[%s] does not exist or is not executable\n' "$linkerd_path"
exit 1
fi
exit_code=0
"$linkerd_path" version --client > /dev/null 2>&1
exit_on_err 'error running linkerd version command'
printf '[ok]\n'
}
##### Cluster helpers #####
check_cluster() {
check_if_k8s_reachable
kubectl version
check_if_l5d_exists
}
delete_cluster() {
local name=$1
"$bindir"/k3d cluster delete "$name" 2>&1
exit_on_err 'error deleting cluster'
}
cleanup_cluster() {
"$bindir"/test-cleanup --context "$context" "$linkerd_path" > /dev/null 2>&1
exit_on_err 'error removing existing Linkerd resources'
}
setup_min_cluster() {
local name=$1
export helm_path="$bindir"/helm
test_setup
if [ -z "$skip_cluster_create" ]; then
"$bindir"/k3d cluster create "$@" --image +v1.20
image_load "$name"
fi
check_cluster
}
setup_cluster() {
local name=$1
export helm_path="$bindir"/helm
test_setup
if [ -z "$skip_cluster_create" ]; then
"$bindir"/k3d cluster create "$@"
image_load "$name"
fi
check_cluster
}
finish() {
if [ -z "$skip_cluster_delete" ]; then
local name=$1
if [ -z "$skip_cluster_create" ]; then
delete_cluster "$name"
else
cleanup_cluster
fi
fi
}
check_if_k8s_reachable() {
printf 'Checking if there is a Kubernetes cluster available...'
exit_code=0
kubectl --context="$context" --request-timeout=5s get ns > /dev/null 2>&1
exit_on_err 'error connecting to Kubernetes cluster'
printf '[ok]\n'
}
check_if_l5d_exists() {
printf 'Checking if Linkerd resources exist on cluster...'
local resources
resources=$(kubectl --context="$context" get all,clusterrole,clusterrolebinding,mutatingwebhookconfigurations,validatingwebhookconfigurations,crd -l linkerd.io/control-plane-ns --all-namespaces -oname)
if [ -n "$resources" ]; then
printf '
Linkerd resources exist on cluster:
\n%s\n
Help:
Run: [%s/test-cleanup] ' "$resources" "$linkerd_path"
exit 1
fi
printf '[ok]\n'
}
##### Test runner helpers #####
image_load() {
cluster_name=$1
images_load=("${images_load_default[@]}")
if [[ "$cluster_name" = *deep ]]; then
images_load+=(jaeger-webhook)
fi
if [ "$cluster_name" = "cni-calico-deep" ]; then
images_load+=(cni-plugin)
fi
case $images in
docker)
"$bindir"/image-load --k3d --cluster "$cluster_name" "${images_load[@]}"
exit_on_err "error calling '$bindir/image-load'"
;;
preload)
"$bindir"/image-load --k3d --cluster "$cluster_name" --preload "${images_load[@]}"
exit_on_err "error calling '$bindir/image-load'"
;;
archive)
"$bindir"/image-load --k3d --archive --cluster "$cluster_name" "${images_load[@]}"
exit_on_err "error calling '$bindir/image-load'"
;;
esac
}
start_test() {
local name=$1
local config=(--k3s-arg '--disable=local-storage,metrics-server@server:0')
case $name in
cluster-domain)
config=("$name" "${config[@]}" --no-lb --k3s-arg --cluster-domain=custom.domain --k3s-arg '--disable=servicelb,traefik@server:0')
;;
cni-calico-deep)
config=("$name" "${config[@]}" --no-lb --k3s-arg --write-kubeconfig-mode=644 --k3s-arg --flannel-backend=none --k3s-arg --cluster-cidr=192.168.0.0/16 --k3s-arg '--disable=servicelb,traefik@server:0')
;;
multicluster)
config=("${config[@]}" --network multicluster-test)
;;
*)
config=("$name" "${config[@]}" --no-lb --k3s-arg '--disable=servicelb,traefik@server:0')
;;
esac
if [ "$name" == "multicluster" ]; then
start_multicluster_test "${config[@]}"
else
start_single_test "${config[@]}"
fi
}
start_single_test() {
name=$1
if [ "$name" == "helm-deep" ]; then
setup_min_cluster "$@"
else
setup_cluster "$@"
fi
if [ -n "$cleanup_docker" ]; then
rm -rf image-archives
docker system prune --force --all
fi
run_"$name"_test
exit_on_err "error calling 'run_${name}_test'"
finish "$name"
}
start_multicluster_test() {
setup_cluster source "$@"
setup_cluster target "$@"
if [ -n "$cleanup_docker" ]; then
rm -rf image-archives
docker system prune --force --all
fi
run_multicluster_test
exit_on_err "error calling 'run_multicluster_test'"
finish source
finish target
}
multicluster_link() {
lbIP=$(kubectl --context="$context" get svc -n kube-system traefik -o 'go-template={{ (index .status.loadBalancer.ingress 0).ip }}')
"$linkerd_path" multicluster link --log-level debug --api-server-address "https://${lbIP}:6443" --cluster-name "$1" --set "enableHeadlessServices=true"
}
run_test(){
local filename=$1
shift
printf 'Test script: [%s] Params: [%s]\n' "${filename##*/}" "$*"
# Exit on failure here
GO111MODULE=on go test -test.timeout=60m --failfast --mod=readonly "$filename" --linkerd="$linkerd_path" --helm-path="$helm_path" --default-allow-policy="$default_allow_policy" --k8s-context="$context" --integration-tests "$@" || exit 1
}
# Returns the latest version for the release channel
# $1: release channel to check
latest_release_channel() {
curl -s https://versioncheck.linkerd.io/version.json | grep -o "$1-[0-9]*.[0-9]*.[0-9]*"
}
# Install a specific Linkerd version.
# $1 - URL to use to download specific Linkerd version
# $2 - Linkerd version
install_version() {
tmp=$(mktemp -d -t l5dbin.XXX)
local install_url=$1
local version=$2
curl -s "$install_url" | HOME=$tmp sh > /dev/null 2>&1
local linkerd_path=$tmp/.linkerd2/bin/linkerd
local test_app_namespace=upgrade-test
(
set -x
# TODO: Use a mix of helm override flags and CLI flags and remove this condition
# once stable-2.10 is out
edge_regex='(edge)-([0-9]+\.[0-9]+\.[0-9]+)'
if [[ "$version" =~ $edge_regex ]]; then
"$linkerd_path" install --set proxyInit.ignoreInboundPorts="1234\,5678" --controller-log-level debug | kubectl --context="$context" apply -f - 2>&1
else
"$linkerd_path" install --skip-inbound-ports '1234,5678' --controller-log-level debug | kubectl --context="$context" apply -f - 2>&1
fi
)
exit_on_err "install_version() - installing $version failed"
(
set -x
"$linkerd_path" check --wait 60m 2>&1
)
exit_on_err 'install_version() - linkerd check failed'
#Now we need to install the app that will be used to verify that upgrade does not break anything
kubectl --context="$context" create namespace "$test_app_namespace" > /dev/null 2>&1
kubectl --context="$context" label namespaces "$test_app_namespace" 'test.linkerd.io/is-test-data-plane'='true' > /dev/null 2>&1
(
set -x
"$linkerd_path" inject "$test_directory/testdata/upgrade_test.yaml" | kubectl --context="$context" apply --namespace="$test_app_namespace" -f - 2>&1
)
exit_on_err 'install_version() - linkerd inject failed'
}
upgrade_test() {
local release_channel=$1
local install_url=$2
local upgrade_version
upgrade_version=$(latest_release_channel "$release_channel")
if [ -z "$upgrade_version" ]; then
echo 'error getting upgrade_version'
exit 1
fi
install_version "$install_url" "$upgrade_version"
# Install viz extension
local tmp_linkerd_path=$tmp/.linkerd2/bin/linkerd
(
set -x
"$tmp_linkerd_path" viz install | kubectl --context="$context" apply -f - 2>&1
)
exit_on_err "upgrade_test() - installing viz extension in $upgrade_version failed"
run_test "$test_directory/install_test.go" --upgrade-from-version="$upgrade_version"
}
# Run the upgrade-edge test by upgrading the most-recent edge release to the
# HEAD of this branch.
run_upgrade-edge_test() {
edge_install_url="https://run.linkerd.io/install-edge"
upgrade_test "edge" "$edge_install_url"
}
# Run the upgrade-stable test by upgrading the most-recent stable release to the
# HEAD of this branch.
run_upgrade-stable_test() {
stable_install_url="https://run.linkerd.io/install"
upgrade_test "stable" "$stable_install_url"
}
setup_helm() {
export helm_path="$bindir"/helm
helm_charts="$( cd "$bindir"/.. && pwd )"/charts
export helm_charts
export helm_release_name='helm-test'
export helm_multicluster_release_name="multicluster-test"
"$bindir"/helm-build
"$helm_path" --kube-context="$context" repo add linkerd https://helm.linkerd.io/stable
exit_on_err 'error setting up Helm'
}
helm_cleanup() {
(
set -e
"$helm_path" --kube-context="$context" --namespace linkerd delete "$helm_release_name-crds" || true
"$helm_path" --kube-context="$context" --namespace linkerd delete "$helm_release_name-control-plane" || true
kubectl delete ns/linkerd
"$helm_path" --kube-context="$context" --namespace linkerd-multicluster delete "$helm_multicluster_release_name" || true
kubectl delete ns/linkerd-multicluster
# We wait for the namespace to be gone so the following call to `cleanup` doesn't fail when it attempts to delete
# the same namespace that is already being deleted here (error thrown by the NamespaceLifecycle controller).
# We don't have that problem with global resources, so no need to wait for them to be gone.
kubectl wait --for=delete ns/linkerd --timeout=120s || true
kubectl wait --for=delete ns/linkerd-multicluster --timeout=120s || true
)
exit_on_err 'error cleaning up Helm'
}
run_helm-upgrade_test() {
local stable_version
stable_version=$(latest_release_channel "stable")
if [ -z "$stable_version" ]; then
echo 'error getting stable_version'
exit 1
fi
setup_helm
helm_viz_chart="$( cd "$bindir"/.. && pwd )"/viz/charts/linkerd-viz
run_test "$test_directory/install_test.go" --helm-path="$helm_path" --helm-charts="$helm_charts" \
--viz-helm-chart="$helm_viz_chart" --helm-stable-chart='linkerd/linkerd2' --viz-helm-stable-chart="linkerd/linkerd-viz" --helm-release="$helm_release_name" --upgrade-helm-from-version="$stable_version"
helm_cleanup
}
run_uninstall_test() {
run_test "$test_directory/uninstall/uninstall_test.go" --uninstall=true
}
run_multicluster_test() {
tmp=$(mktemp -d -t l5dcerts.XXX)
pwd=$PWD
cd "$tmp"
"$bindir"/certs-openssl
cd "$pwd"
export context="k3d-target"
run_test "$test_directory/install_test.go" --multicluster --certs-path "$tmp"
run_test "$test_directory/multicluster/target1" --multicluster
link=$(multicluster_link target)
export context="k3d-source"
run_test "$test_directory/install_test.go" --multicluster --certs-path "$tmp"
echo "$link" | kubectl --context="$context" apply -f -
run_test "$test_directory/multicluster/source" --multicluster
export context="k3d-target"
run_test "$test_directory/multicluster/target2" --multicluster
export context="k3d-target"
run_test "$test_directory/multicluster/target-statefulset" --multicluster
}
run_deep_test() {
local tests=()
run_test "$test_directory/install_test.go"
while IFS= read -r line; do tests+=("$line"); done <<< "$(go list "$test_directory"/.../...)"
for test in "${tests[@]}"; do
run_test "$test"
done
}
run_default-policy-deny_test() {
local tests=()
export default_allow_policy='deny'
run_test "$test_directory/install_test.go"
}
run_cni-calico-deep_test() {
local tests=()
run_test "$test_directory/install_test.go" --cni --calico
while IFS= read -r line; do tests+=("$line"); done <<< "$(go list "$test_directory"/.../...)"
for test in "${tests[@]}"; do
run_test "$test" --cni
done
}
run_helm-deep_test() {
local tests=()
setup_helm
helm_multicluster_chart="$( cd "$bindir"/.. && pwd )"/multicluster/charts/linkerd-multicluster
helm_viz_chart="$( cd "$bindir"/.. && pwd )"/viz/charts/linkerd-viz
run_test "$test_directory/install_test.go" --helm-path="$helm_path" --helm-charts="$helm_charts" \
--helm-release="$helm_release_name" --multicluster-helm-chart="$helm_multicluster_chart" \
--viz-helm-chart="$helm_viz_chart" --multicluster-helm-release="$helm_multicluster_release_name"
while IFS= read -r line; do tests+=("$line"); done <<< "$(go list "$test_directory"/.../...)"
for test in "${tests[@]}"; do
run_test "$test"
done
helm_cleanup
}
run_external-issuer_test() {
run_test "$test_directory/install_test.go" --external-issuer=true
run_test "$test_directory/externalissuer/external_issuer_test.go" --external-issuer=true
}
run_external-prometheus-deep_test() {
run_test "$test_directory/install_test.go" --external-prometheus=true
while IFS= read -r line; do tests+=("$line"); done <<< "$(go list "$test_directory"/.../...)"
for test in "${tests[@]}"; do
run_test "$test" --external-prometheus=true
done
}
run_cluster-domain_test() {
run_test "$test_directory/install_test.go" --cluster-domain='custom.domain'
}
# wrapper to implement external tests
run_external-resources_test(){
run_test "$test_directory/install_test.go"
run_test "$test_directory/externalresources/rabbitmq_test.go"
}
# exit_on_err should be called right after a command to check the result status
# and eventually generate a GitHub error annotation. Do not use after calls to
# `go test` as that generates its own annotations. Note this should be called
# outside subshells in order for the script to terminate.
exit_on_err() {
exit_code=$?
if [ $exit_code -ne 0 ]; then
export GH_ANNOTATION=${GH_ANNOTATION:-}
if [ -n "$GH_ANNOTATION" ]; then
printf '::error::%s\n' "$1"
else
printf '\n=== FAIL: %s\n' "$1"
fi
exit $exit_code
fi
}