From c6629e78cec6daf8c031682d23b8d4176aeee9d5 Mon Sep 17 00:00:00 2001 From: kubevirt-bot Date: Tue, 26 Oct 2021 00:00:34 +0000 Subject: [PATCH] Bump kubevirtci [8198e9c sync provider.sh between kind and kind-sriov](https://github.com/kubevirt/kubevirtci/pull/587) [8b1d599 Restore kind-1.19-sriov provider files](https://github.com/kubevirt/kubevirtci/pull/695) [bf9b729 Upgrade SR-IOV provider nodes image to k8s-1.22](https://github.com/kubevirt/kubevirtci/pull/694) [5a10f48 Add check-cluster-up script for KinD providers](https://github.com/kubevirt/kubevirtci/pull/645) Signed-off-by: kubevirt-bot --- cluster-up-sha.txt | 2 +- cluster-up/cluster/kind-1.19-sriov/README.md | 107 +++++--- .../cluster/kind-1.19-sriov/conformance.json | 88 +++++++ .../cluster/kind-1.19-sriov/provider.sh | 4 +- cluster-up/cluster/kind-1.22-sriov/OWNERS | 10 + cluster-up/cluster/kind-1.22-sriov/README.md | 101 +++++++ .../kind-1.22-sriov/TROUBLESHOOTING.md | 60 +++++ .../certcreator/certlib/selfsign.go | 115 ++++++++ .../kind-1.22-sriov/certcreator/certsecret.go | 159 +++++++++++ .../kind-1.22-sriov/certcreator/go.mod | 39 +++ .../kind-1.22-sriov/certcreator/go.sum | 194 ++++++++++++++ .../kind-1.22-sriov/config_sriov_cluster.sh | 69 +++++ .../cluster/kind-1.22-sriov/conformance.json | 88 +++++++ .../manifests/network_config_policy.yaml | 16 ++ .../cluster/kind-1.22-sriov/provider.sh | 59 +++++ .../manifests/kustomization.yaml | 34 +++ .../sriov-components/manifests/multus.yaml | 247 ++++++++++++++++++ .../manifests/patch-node-selector.yaml.in | 3 + .../patch-sriovdp-resource-prefix.yaml.in | 3 + .../manifests/sriov-cni-daemonset.yaml | 47 ++++ .../sriov-components/manifests/sriov-ns.yaml | 4 + .../manifests/sriovdp-config.yaml.in | 17 ++ .../manifests/sriovdp-daemonset.yaml | 202 ++++++++++++++ .../sriov-components/sriov_components.sh | 206 +++++++++++++++ .../sriov-node/configure_vfs.sh | 104 ++++++++ .../kind-1.22-sriov/sriov-node/node.sh | 110 ++++++++ .../cluster/kind-1.22-sriov/sriov_operator.sh | 212 +++++++++++++++ .../cluster/kind-1.22-sriov/sriovdp_setup.sh | 42 +++ .../README.md | 12 +- .../provider.sh | 29 +- cluster-up/cluster/kind/README.md | 2 +- cluster-up/cluster/kind/check-cluster-up.sh | 77 ++++++ cluster-up/hack/common.sh | 2 +- cluster-up/version.txt | 2 +- hack/config-default.sh | 2 +- 35 files changed, 2403 insertions(+), 65 deletions(-) create mode 100644 cluster-up/cluster/kind-1.19-sriov/conformance.json create mode 100644 cluster-up/cluster/kind-1.22-sriov/OWNERS create mode 100644 cluster-up/cluster/kind-1.22-sriov/README.md create mode 100644 cluster-up/cluster/kind-1.22-sriov/TROUBLESHOOTING.md create mode 100644 cluster-up/cluster/kind-1.22-sriov/certcreator/certlib/selfsign.go create mode 100644 cluster-up/cluster/kind-1.22-sriov/certcreator/certsecret.go create mode 100644 cluster-up/cluster/kind-1.22-sriov/certcreator/go.mod create mode 100644 cluster-up/cluster/kind-1.22-sriov/certcreator/go.sum create mode 100755 cluster-up/cluster/kind-1.22-sriov/config_sriov_cluster.sh create mode 100644 cluster-up/cluster/kind-1.22-sriov/conformance.json create mode 100644 cluster-up/cluster/kind-1.22-sriov/manifests/network_config_policy.yaml create mode 100755 cluster-up/cluster/kind-1.22-sriov/provider.sh create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/kustomization.yaml create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/multus.yaml create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-node-selector.yaml.in create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-ns.yaml create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-config.yaml.in create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-daemonset.yaml create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-components/sriov_components.sh create mode 100755 cluster-up/cluster/kind-1.22-sriov/sriov-node/configure_vfs.sh create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov-node/node.sh create mode 100644 cluster-up/cluster/kind-1.22-sriov/sriov_operator.sh create mode 100755 cluster-up/cluster/kind-1.22-sriov/sriovdp_setup.sh rename cluster-up/cluster/{kind-k8s-1.17 => kind-k8s-1.19}/README.md (81%) rename cluster-up/cluster/{kind-k8s-1.17 => kind-k8s-1.19}/provider.sh (72%) create mode 100755 cluster-up/cluster/kind/check-cluster-up.sh diff --git a/cluster-up-sha.txt b/cluster-up-sha.txt index 3735929c66f0..f91e33ca1b74 100644 --- a/cluster-up-sha.txt +++ b/cluster-up-sha.txt @@ -1 +1 @@ -f7906a7e1dfeeb25fe3bac94f1852671aa2026b1 +9853ae783af481217142f0330a494a48c07820df diff --git a/cluster-up/cluster/kind-1.19-sriov/README.md b/cluster-up/cluster/kind-1.19-sriov/README.md index cb8de791a5e8..7da4dcfd92ba 100644 --- a/cluster-up/cluster/kind-1.19-sriov/README.md +++ b/cluster-up/cluster/kind-1.19-sriov/README.md @@ -1,74 +1,101 @@ -# K8S 1.17.0 with sriov in a Kind cluster +# K8S 1.19.11 with SR-IOV in a Kind cluster -Provides a pre-deployed k8s cluster with version 1.17.0 that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. -The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at +Provides a pre-deployed containerized k8s cluster with version 1.19.11 that runs +using [KinD](https://github.com/kubernetes-sigs/kind) +The cluster is completely ephemeral and is recreated on every cluster restart. The KubeVirt containers are built on the +local machine and are then pushed to a registry which is exposed at `localhost:5000`. -This version also expects to have sriov-enabled nics on the current host, and will move physical interfaces into the `kind`'s cluster worker node(s) so that they can be used through multus. +This version also expects to have SR-IOV enabled nics (SR-IOV Physical Function) on the current host, and will move +physical interfaces into the `KinD`'s cluster worker node(s) so that they can be used through multus and SR-IOV +components. + +This providers also deploys [multus](https://github.com/k8snetworkplumbingwg/multus-cni) +, [sriov-cni](https://github.com/k8snetworkplumbingwg/sriov-cni) +and [sriov-device-plugin](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin). ## Bringing the cluster up ```bash -export KUBEVIRT_PROVIDER=kind-k8s-sriov-1.17.0 +export KUBEVIRT_PROVIDER=kind-1.19-sriov +export KUBEVIRT_NUM_NODES=3 make cluster-up -``` - -The cluster can be accessed as usual: -```bash $ cluster-up/kubectl.sh get nodes -NAME STATUS ROLES AGE VERSION -sriov-control-plane Ready master 6m14s v1.17.0 -sriov-worker Ready worker 5m36s v1.17.0 +NAME STATUS ROLES AGE VERSION +sriov-control-plane Ready control-plane,master 20h v1.19.11 +sriov-worker Ready worker 20h v1.19.11 +sriov-worker2 Ready worker 20h v1.19.11 + +$ cluster-up/kubectl.sh get pods -n kube-system -l app=multus +NAME READY STATUS RESTARTS AGE +kube-multus-ds-amd64-d45n4 1/1 Running 0 20h +kube-multus-ds-amd64-g26xh 1/1 Running 0 20h +kube-multus-ds-amd64-mfh7c 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriov-cni +NAME READY STATUS RESTARTS AGE +kube-sriov-cni-ds-amd64-fv5cr 1/1 Running 0 20h +kube-sriov-cni-ds-amd64-q95q9 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriovdp +NAME READY STATUS RESTARTS AGE +kube-sriov-device-plugin-amd64-h7h84 1/1 Running 0 20h +kube-sriov-device-plugin-amd64-xrr5z 1/1 Running 0 20h ``` ## Bringing the cluster down ```bash -export KUBEVIRT_PROVIDER=kind-k8s-sriov-1.17.0 +export KUBEVIRT_PROVIDER=kind-1.19-sriov make cluster-down ``` -This destroys the whole cluster. +This destroys the whole cluster, and moves the SR-IOV nics to the root network namespace. ## Setting a custom kind version -In order to use a custom kind image / kind version, -export KIND_NODE_IMAGE, KIND_VERSION, KUBECTL_PATH before running cluster-up. -For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: +In order to use a custom kind image / kind version, export `KIND_NODE_IMAGE`, `KIND_VERSION`, `KUBECTL_PATH` before +running cluster-up. For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: + ```bash export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" export KIND_VERSION="0.9.0" export KUBECTL_PATH="/usr/bin/kubectl" ``` + This allows users to test or use custom images / different kind versions before making them official. See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. -## Running multi sriov clusters locally -Kubevirtci sriov provider supports running two clusters side by side with few known limitations. +## Running multi SR-IOV clusters locally + +Kubevirtci SR-IOV provider supports running two clusters side by side with few known limitations. General considerations: -- A sriov PF must be available for each cluster. -In order to achieve that, there are two options: -1. Assign just one PF for each worker node of each cluster by using `export PF_COUNT_PER_NODE=1` (this is the default value). -2. Optional method: `export PF_BLACKLIST=` the non used PFs, in order to prevent them from being allocated to the current cluster. -The user can list the PFs that should not be allocated to the current cluster, keeping in mind -that at least one (or 2 in case of migration), should not be listed, so they would be allocated for the current cluster. -Note: another reason to blacklist a PF, is in case its has a defect or should be kept for other operations (for example sniffing). -- Clusters should be created one by another and not in parallel (to avoid races over SRIOV PF's). -- The cluster names must be different. -This can be achieved by setting `export CLUSTER_NAME=sriov2` on the 2nd cluster. -The default `CLUSTER_NAME` is `sriov`. -The 2nd cluster registry would be exposed at `localhost:5001` automatically, once the `CLUSTER_NAME` -is set to a non default value. -- Each cluster should be created on its own git clone folder, i.e -`/root/project/kubevirtci1` -`/root/project/kubevirtci2` -In order to switch between them, change dir to that folder and set the env variables `KUBECONFIG` and `KUBEVIRT_PROVIDER`. +- A SR-IOV PF must be available for each cluster. In order to achieve that, there are two options: + +1. Assign just one PF for each worker node of each cluster by using `export PF_COUNT_PER_NODE=1` (this is the default + value). +2. Optional method: `export PF_BLACKLIST=` the non used PFs, in order to prevent them from being allocated to + the current cluster. The user can list the PFs that should not be allocated to the current cluster, keeping in mind + that at least one (or 2 in case of migration), should not be listed, so they would be allocated for the current + cluster. Note: another reason to blacklist a PF, is in case its has a defect or should be kept for other operations ( + for example sniffing). + +- Clusters should be created one by another and not in parallel (to avoid races over SR-IOV PF's). +- The cluster names must be different. This can be achieved by setting `export CLUSTER_NAME=sriov2` on the 2nd cluster. + The default `CLUSTER_NAME` is `sriov`. The 2nd cluster registry would be exposed at `localhost:5001` automatically, + once the `CLUSTER_NAME` + is set to a non default value. +- Each cluster should be created on its own git clone folder, i.e: + `/root/project/kubevirtci1` + `/root/project/kubevirtci2` + In order to switch between them, change dir to that folder and set the env variables `KUBECONFIG` + and `KUBEVIRT_PROVIDER`. - In case only one PF exists, for example if running on prow which will assign only one PF per job in its own DinD, -Kubevirtci is agnostic and nothing needs to be done, since all conditions above are met. -- Upper limit of the number of clusters that can be run on the same time equals number of PFs / number of PFs per cluster, -therefore, in case there is only one PF, only one cluster can be created. -Locally the actual limit currently supported is two clusters. + Kubevirtci is agnostic and nothing needs to be done, since all conditions above are met. +- Upper limit of the number of clusters that can be run on the same time equals number of PFs / number of PFs per + cluster, therefore, in case there is only one PF, only one cluster can be created. Locally the actual limit currently + supported is two clusters. - In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/cluster-up/cluster/kind-1.19-sriov/conformance.json b/cluster-up/cluster/kind-1.19-sriov/conformance.json new file mode 100644 index 000000000000..c796ea1b0197 --- /dev/null +++ b/cluster-up/cluster/kind-1.19-sriov/conformance.json @@ -0,0 +1,88 @@ +{ + "Description": "DEFAULT", + "UUID": "c3bc7d76-6ce8-4c8a-8bcb-5c7ae5fb22a3", + "Version": "v0.50.0", + "ResultsDir": "/tmp/sonobuoy", + "Resources": [ + "apiservices", + "certificatesigningrequests", + "clusterrolebindings", + "clusterroles", + "componentstatuses", + "configmaps", + "controllerrevisions", + "cronjobs", + "customresourcedefinitions", + "daemonsets", + "deployments", + "endpoints", + "ingresses", + "jobs", + "leases", + "limitranges", + "mutatingwebhookconfigurations", + "namespaces", + "networkpolicies", + "nodes", + "persistentvolumeclaims", + "persistentvolumes", + "poddisruptionbudgets", + "pods", + "podlogs", + "podsecuritypolicies", + "podtemplates", + "priorityclasses", + "replicasets", + "replicationcontrollers", + "resourcequotas", + "rolebindings", + "roles", + "servergroups", + "serverversion", + "serviceaccounts", + "services", + "statefulsets", + "storageclasses", + "validatingwebhookconfigurations", + "volumeattachments" + ], + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null, + "LimitSize": "", + "LimitTime": "" + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 10800 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "projects.registry.vmware.com/sonobuoy/sonobuoy:v0.50.0", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "ProgressUpdatesPort": "8099" +} diff --git a/cluster-up/cluster/kind-1.19-sriov/provider.sh b/cluster-up/cluster/kind-1.19-sriov/provider.sh index 1b834f7847ad..39e82fa57791 100755 --- a/cluster-up/cluster/kind-1.19-sriov/provider.sh +++ b/cluster-up/cluster/kind-1.19-sriov/provider.sh @@ -24,7 +24,7 @@ function set_kind_params() { } function print_sriov_data() { - nodes=$(_kubectl get nodes -o=custom-columns=:.metadata.name | awk NF) + nodes="$(_kubectl get nodes -o=custom-columns=:.metadata.name | awk NF)" for node in $nodes; do if [[ ! "$node" =~ .*"control-plane".* ]]; then echo "Node: $node" @@ -53,7 +53,7 @@ function up() { # In order to support live migration on containerized cluster we need to workaround # Libvirt uuid check for source and target nodes. # To do that we create PodPreset that mounts fake random product_uuid to virt-launcher pods, - # and kubevirt SRIOV tests namespace for the PodPrest beforhand. + # and kubevirt SRIOV tests namespace for the PodPreset beforehand. podpreset::expose_unique_product_uuid_per_node "$CLUSTER_NAME" "$SRIOV_TESTS_NS" print_sriov_data diff --git a/cluster-up/cluster/kind-1.22-sriov/OWNERS b/cluster-up/cluster/kind-1.22-sriov/OWNERS new file mode 100644 index 000000000000..786c313b2c51 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/OWNERS @@ -0,0 +1,10 @@ +filters: + ".*": + reviewers: + - qinqon + - oshoval + - phoracek + - ormergi + approvers: + - qinqon + - phoracek diff --git a/cluster-up/cluster/kind-1.22-sriov/README.md b/cluster-up/cluster/kind-1.22-sriov/README.md new file mode 100644 index 000000000000..1155f17892e3 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/README.md @@ -0,0 +1,101 @@ +# K8S 1.22.2 with SR-IOV in a Kind cluster + +Provides a pre-deployed containerized k8s cluster with version 1.22.2 that runs +using [KinD](https://github.com/kubernetes-sigs/kind) +The cluster is completely ephemeral and is recreated on every cluster restart. The KubeVirt containers are built on the +local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +This version also expects to have SR-IOV enabled nics (SR-IOV Physical Function) on the current host, and will move +physical interfaces into the `KinD`'s cluster worker node(s) so that they can be used through multus and SR-IOV +components. + +This providers also deploys [multus](https://github.com/k8snetworkplumbingwg/multus-cni) +, [sriov-cni](https://github.com/k8snetworkplumbingwg/sriov-cni) +and [sriov-device-plugin](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin). + +## Bringing the cluster up + +```bash +export KUBEVIRT_PROVIDER=kind-1.22-sriov +export KUBEVIRT_NUM_NODES=3 +make cluster-up + +$ cluster-up/kubectl.sh get nodes +NAME STATUS ROLES AGE VERSION +sriov-control-plane Ready control-plane,master 20h v1.22.2 +sriov-worker Ready worker 20h v1.22.2 +sriov-worker2 Ready worker 20h v1.22.2 + +$ cluster-up/kubectl.sh get pods -n kube-system -l app=multus +NAME READY STATUS RESTARTS AGE +kube-multus-ds-amd64-d45n4 1/1 Running 0 20h +kube-multus-ds-amd64-g26xh 1/1 Running 0 20h +kube-multus-ds-amd64-mfh7c 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriov-cni +NAME READY STATUS RESTARTS AGE +kube-sriov-cni-ds-amd64-fv5cr 1/1 Running 0 20h +kube-sriov-cni-ds-amd64-q95q9 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriovdp +NAME READY STATUS RESTARTS AGE +kube-sriov-device-plugin-amd64-h7h84 1/1 Running 0 20h +kube-sriov-device-plugin-amd64-xrr5z 1/1 Running 0 20h +``` + +## Bringing the cluster down + +```bash +export KUBEVIRT_PROVIDER=kind-1.22-sriov +make cluster-down +``` + +This destroys the whole cluster, and moves the SR-IOV nics to the root network namespace. + +## Setting a custom kind version + +In order to use a custom kind image / kind version, export `KIND_NODE_IMAGE`, `KIND_VERSION`, `KUBECTL_PATH` before +running cluster-up. For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: + +```bash +export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" +export KIND_VERSION="0.9.0" +export KUBECTL_PATH="/usr/bin/kubectl" +``` + +This allows users to test or use custom images / different kind versions before making them official. +See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. + +## Running multi SR-IOV clusters locally + +Kubevirtci SR-IOV provider supports running two clusters side by side with few known limitations. + +General considerations: + +- A SR-IOV PF must be available for each cluster. In order to achieve that, there are two options: + +1. Assign just one PF for each worker node of each cluster by using `export PF_COUNT_PER_NODE=1` (this is the default + value). +2. Optional method: `export PF_BLACKLIST=` the non used PFs, in order to prevent them from being allocated to + the current cluster. The user can list the PFs that should not be allocated to the current cluster, keeping in mind + that at least one (or 2 in case of migration), should not be listed, so they would be allocated for the current + cluster. Note: another reason to blacklist a PF, is in case its has a defect or should be kept for other operations ( + for example sniffing). + +- Clusters should be created one by another and not in parallel (to avoid races over SR-IOV PF's). +- The cluster names must be different. This can be achieved by setting `export CLUSTER_NAME=sriov2` on the 2nd cluster. + The default `CLUSTER_NAME` is `sriov`. The 2nd cluster registry would be exposed at `localhost:5001` automatically, + once the `CLUSTER_NAME` + is set to a non default value. +- Each cluster should be created on its own git clone folder, i.e: + `/root/project/kubevirtci1` + `/root/project/kubevirtci2` + In order to switch between them, change dir to that folder and set the env variables `KUBECONFIG` + and `KUBEVIRT_PROVIDER`. +- In case only one PF exists, for example if running on prow which will assign only one PF per job in its own DinD, + Kubevirtci is agnostic and nothing needs to be done, since all conditions above are met. +- Upper limit of the number of clusters that can be run on the same time equals number of PFs / number of PFs per + cluster, therefore, in case there is only one PF, only one cluster can be created. Locally the actual limit currently + supported is two clusters. +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/cluster-up/cluster/kind-1.22-sriov/TROUBLESHOOTING.md b/cluster-up/cluster/kind-1.22-sriov/TROUBLESHOOTING.md new file mode 100644 index 000000000000..7b699427a410 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/TROUBLESHOOTING.md @@ -0,0 +1,60 @@ +# How to troubleshoot a failing kind job + +If logging and output artifacts are not enough, there is a way to connect to a running CI pod and troubleshoot directly from there. + +## Pre-requisites + +- A working (enabled) account on the [CI cluster](shift.ovirt.org), specifically enabled to the `kubevirt-prow-jobs` project. +- The [mkpj tool](https://github.com/kubernetes/test-infra/tree/master/prow/cmd/mkpj) installed + +## Launching a custom job + +Through the `mkpj` tool, it's possible to craft a custom Prow Job that can be executed on the CI cluster. + +Just `go get` it by running `go get k8s.io/test-infra/prow/cmd/mkpj` + +Then run the following command from a checkout of the [project-infra repo](https://github.com/kubevirt/project-infra): + +```bash +mkpj --pull-number $KUBEVIRTPRNUMBER -job pull-kubevirt-e2e-kind-k8s-sriov-1.17.0 -job-config-path github/ci/prow/files/jobs/kubevirt/kubevirt-presubmits.yaml --config-path github/ci/prow/files/config.yaml > debugkind.yaml +``` + +You will end up having a ProwJob manifest in the `debugkind.yaml` file. + +It's strongly recommended to replace the job's name, as it will be easier to find and debug the relative pod, by replacing `metadata.name` with something more recognizeable. + +The $KUBEVIRTPRNUMBER can be an actual PR on the [kubevirt repo](https://github.com/kubevirt/kubevirt). + +In case we just want to debug the cluster provided by the CI, it's recommended to override the entry point, either in the test PR we are instrumenting (a good sample can be found [here](https://github.com/kubevirt/kubevirt/pull/3022)), or by overriding the entry point directly in the prow job's manifest. + +Remember that we want the cluster long living, so a long sleep must be provided as part of the entry point. + +Make sure you switch to the `kubevirt-prow-jobs` project, and apply the manifest: + +```bash + kubectl apply -f debugkind.yaml +``` + +You will end up with a ProwJob object, and a pod with the same name you gave to the ProwJob. + +Once the pod is up & running, connect to it via bash: + +```bash + kubectl exec -it debugprowjobpod bash +``` + +### Logistics + +Once you are in the pod, you'll be able to troubleshoot what's happening in the environment CI is running its tests. + +Run the follow to bring up a [kind](https://github.com/kubernetes-sigs/kind) cluster with a single node setup and the SR-IOV operator already setup to go (if it wasn't already done by the job itself). + +```bash +KUBEVIRT_PROVIDER=kind-k8s-sriov-1.17.0 make cluster-up +``` + +The kubeconfig file will be available under `/root/.kube/kind-config-sriov`. + +The `kubectl` binary is already on board and in `$PATH`. + +The container acting as node is the one named `sriov-control-plane`. You can even see what's in there by running `docker exec -it sriov-control-plane bash`. diff --git a/cluster-up/cluster/kind-1.22-sriov/certcreator/certlib/selfsign.go b/cluster-up/cluster/kind-1.22-sriov/certcreator/certlib/selfsign.go new file mode 100644 index 000000000000..65b5730bffae --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/certcreator/certlib/selfsign.go @@ -0,0 +1,115 @@ +package certlib + +import ( + "bytes" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "math/rand" + "time" +) + +type SelfSignedCertificate struct { + DNSNames []string + CommonName string + Certificate *bytes.Buffer + PrivateKey *bytes.Buffer +} + +func (s *SelfSignedCertificate) Generate() error { + var caPEM *bytes.Buffer + + randomSource := rand.New(rand.NewSource(time.Now().Unix())) + caCertificateConfig := &x509.Certificate{ + SerialNumber: big.NewInt(randomSource.Int63()), + Subject: pkix.Name{ + Organization: []string{"kubvirt.io"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + caPrivateKey, err := rsa.GenerateKey(cryptorand.Reader, 4096) + if err != nil { + return fmt.Errorf("failed to generate CA private key: %v", err) + } + + caSelfSignedCertificateBytes, err := x509.CreateCertificate( + cryptorand.Reader, + caCertificateConfig, + caCertificateConfig, + &caPrivateKey.PublicKey, + caPrivateKey) + if err != nil { + return fmt.Errorf("failed to generate CA certificate: %v", err) + } + + // PEM encode CA cert + caPEM = new(bytes.Buffer) + err = pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caSelfSignedCertificateBytes, + }) + if err != nil { + return fmt.Errorf("failed to encode CA certificate bytes to PEM: %v", err) + } + + serverCertificateConfig := &x509.Certificate{ + DNSNames: s.DNSNames, + SerialNumber: big.NewInt(randomSource.Int63()), + Subject: pkix.Name{ + CommonName: s.CommonName, + Organization: []string{"kubevirt.io"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + SubjectKeyId: []byte{1, 2, 3, 4, 6}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + + serverPrivateKey, err := rsa.GenerateKey(cryptorand.Reader, 4096) + if err != nil { + return fmt.Errorf("failed to generate server private key: %v", err) + } + + // Signing server certificate + serverCertificateBytes, err := x509.CreateCertificate( + cryptorand.Reader, + serverCertificateConfig, + caCertificateConfig, + &serverPrivateKey.PublicKey, + caPrivateKey) + if err != nil { + return fmt.Errorf("failed to sign server certificate: %v", err) + } + + // PEM encode the server cert and key + s.Certificate = new(bytes.Buffer) + err = pem.Encode(s.Certificate, &pem.Block{ + Type: "CERTIFICATE", + Bytes: serverCertificateBytes, + }) + if err != nil { + return fmt.Errorf("failed to encode server certificate bytes to PEM: %v", err) + } + + s.PrivateKey = new(bytes.Buffer) + err = pem.Encode(s.PrivateKey, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(serverPrivateKey), + }) + if err != nil { + return fmt.Errorf("failed to encode server private key bytes to PEM: %v", err) + } + + return nil +} diff --git a/cluster-up/cluster/kind-1.22-sriov/certcreator/certsecret.go b/cluster-up/cluster/kind-1.22-sriov/certcreator/certsecret.go new file mode 100644 index 000000000000..205985a6415d --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/certcreator/certsecret.go @@ -0,0 +1,159 @@ +package main + +import ( + "encoding/base64" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "kubevirt.io/kubevirtci/cluster-up/cluster/kind-k8s-sriov-1.17.0/certcreator/certlib" +) + +func handleKubeClientConfig(kubeconfig string) (*rest.Config, error) { + if kubeconfig == "" { + log.Printf("Using env kubeconfig %s", kubeconfig) + kubeconfig = os.Getenv("KUBECONFIG") + } + + var config *rest.Config + var err error + if kubeconfig != "" { + log.Printf("Loading kube client config from path %q", kubeconfig) + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + } else { + log.Printf("Using in-cluster kube client config") + config, err = rest.InClusterConfig() + } + if err != nil { + return nil, fmt.Errorf("could not get the client: %v", err) + } + + return config, nil +} + +func generate(hookName, namespace string) ([]byte, []byte, error) { + serviceName := strings.Join([]string{hookName, "service"}, "-") + + certConfig := certlib.SelfSignedCertificate{ + CommonName: strings.Join([]string{serviceName, namespace, "svc"}, "."), + DNSNames: []string{ + serviceName, + strings.Join([]string{serviceName, namespace}, "."), + strings.Join([]string{serviceName, namespace, "svc"}, ".")}, + } + err := certConfig.Generate() + if err != nil { + return nil, nil, fmt.Errorf("failed to generate self-signed certificate: %v", err) + } + log.Printf("Self-Signed certificate created successfully for CN %s", certConfig.CommonName) + + return certConfig.Certificate.Bytes(), certConfig.PrivateKey.Bytes(), nil +} + +func exportCertificateFile(data []byte, filePath string) error { + certificateFileName := fmt.Sprintf("%s.cert", filePath) + encodedData := []byte(base64.StdEncoding.EncodeToString(data)) + if err := ioutil.WriteFile(certificateFileName, encodedData, 0644); err != nil { + return fmt.Errorf("failed to write content to file %s: %v", filePath, err) + } + log.Printf("certificate exported successfully to: %s", filePath) + + return nil +} + +func createSecret(clusterApi kubernetes.Interface, namespace, secretName string, certificate, key []byte) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + Data: map[string][]byte{ + "tls.crt": certificate, + "tls.key": key, + }, + } + + err := wait.Poll(time.Second*5, time.Minute*3, func() (bool, error) { + _, err := clusterApi.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, nil + } + return false, fmt.Errorf("secret %s already exists", secret.Name) + }) + + if err != nil { + return err + } + + err = wait.Poll(time.Second*5, time.Minute*3, func() (bool, error) { + _, err := clusterApi.CoreV1().Secrets(namespace).Create(secret) + if err != nil { + if errors.IsAlreadyExists(err) { + return true, nil + } + log.Printf("failed to create secret '%s': %v", secret.Name, err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("timeout waiting for secret '%s' to create secret: %v", secret.Name, err) + } + log.Printf("Secret '%s' at '%s' created successfully", secret.Name, namespace) + + return nil +} + +func main() { + namespace := flag.String("namespace", "", "The namespace of the webhook") + kubeconfig := flag.String("kubeconfig", "", "The path of kubeconfig") + hookName := flag.String("hook", "", "The name of the hook") + secretName := flag.String("secret", "", "The name of the secret") + flag.Parse() + + if *namespace == "" || *hookName == "" || *secretName == "" { + flag.Usage() + log.Fatal("Not enough arguments") + } + + var err error + config, err := handleKubeClientConfig(*kubeconfig) + if err != nil { + log.Fatalf("Failed to set kubernetes client config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Fatalf("Failed to set up Kubernetes client: %v", err) + } + + certificate, key, err := generate(*hookName, *namespace) + if err != nil { + log.Fatalf("Failed to generate certificate: %v", err) + } + + err = exportCertificateFile(certificate, *hookName) + if err != nil { + log.Fatalf("Failed to export certificate to file: %v", err) + } + + err = createSecret(clientset, *namespace, *secretName, certificate, key) + if err != nil { + log.Fatalf("Failed to create Secret: %v", err) + } +} diff --git a/cluster-up/cluster/kind-1.22-sriov/certcreator/go.mod b/cluster-up/cluster/kind-1.22-sriov/certcreator/go.mod new file mode 100644 index 000000000000..f8231c85e7a6 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/certcreator/go.mod @@ -0,0 +1,39 @@ +module kubevirt.io/kubevirtci/cluster-up/cluster/kind-k8s-sriov-1.17.0/certcreator + +go 1.13 + +require ( + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect + k8s.io/api v0.17.3 + k8s.io/apimachinery v0.17.3 + k8s.io/client-go v11.0.0+incompatible + k8s.io/utils v0.0.0-20200124190032-861946025e34 // indirect +) + +// Pinned to kubernetes-1.17.0 +replace ( + k8s.io/api => k8s.io/api v0.17.0 + k8s.io/api/admissionregistration => k8s.io/api/admissionregistration v0.17.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.17.0 + k8s.io/apiserver => k8s.io/apiserver v0.17.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.0 + k8s.io/client-go => k8s.io/client-go v0.17.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.17.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.17.0 + k8s.io/code-generator => k8s.io/code-generator v0.17.0 + k8s.io/component-base => k8s.io/component-base v0.17.0 + k8s.io/cri-api => k8s.io/cri-api v0.17.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.17.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.17.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.17.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.17.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.17.0 + k8s.io/kubectl => k8s.io/kubectl v0.17.0 + k8s.io/kubelet => k8s.io/kubelet v0.17.0 + k8s.io/kubernetes => k8s.io/kubernetes v1.17.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.17.0 + k8s.io/metrics => k8s.io/metrics v0.17.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.0 +) diff --git a/cluster-up/cluster/kind-1.22-sriov/certcreator/go.sum b/cluster-up/cluster/kind-1.22-sriov/certcreator/go.sum new file mode 100644 index 000000000000..532e991047ba --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/certcreator/go.sum @@ -0,0 +1,194 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM= +k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo= +k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg= +k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200124190032-861946025e34 h1:HjlUD6M0K3P8nRXmr2B9o4F9dUy9TCj/aEpReeyi6+k= +k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/cluster-up/cluster/kind-1.22-sriov/config_sriov_cluster.sh b/cluster-up/cluster/kind-1.22-sriov/config_sriov_cluster.sh new file mode 100755 index 000000000000..effef59033ce --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/config_sriov_cluster.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +[ $(id -u) -ne 0 ] && echo "FATAL: this script requires sudo privileges" >&2 && exit 1 + +set -xe + +PF_COUNT_PER_NODE=${PF_COUNT_PER_NODE:-1} +[ $PF_COUNT_PER_NODE -le 0 ] && echo "FATAL: PF_COUNT_PER_NODE must be a positive integer" >&2 && exit 1 + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +source ${SCRIPT_PATH}/sriov-node/node.sh +source ${SCRIPT_PATH}/sriov-components/sriov_components.sh + +CONFIGURE_VFS_SCRIPT_PATH="$SCRIPT_PATH/sriov-node/configure_vfs.sh" + +SRIOV_COMPONENTS_NAMESPACE="sriov" +SRIOV_NODE_LABEL_KEY="sriov_capable" +SRIOV_NODE_LABEL_VALUE="true" +SRIOV_NODE_LABEL="$SRIOV_NODE_LABEL_KEY=$SRIOV_NODE_LABEL_VALUE" +SRIOVDP_RESOURCE_PREFIX="kubevirt.io" +SRIOVDP_RESOURCE_NAME="sriov_net" +VFS_DRIVER="vfio-pci" +VFS_DRIVER_KMODULE="vfio_pci" + +function validate_nodes_sriov_allocatable_resource() { + local -r resource_name="$SRIOVDP_RESOURCE_PREFIX/$SRIOVDP_RESOURCE_NAME" + local -r sriov_nodes=$(_kubectl get nodes -l $SRIOV_NODE_LABEL -o custom-columns=:.metadata.name --no-headers) + + local num_vfs + for sriov_node in $sriov_nodes; do + num_vfs=$(node::total_vfs_count "$sriov_node") + sriov_components::wait_allocatable_resource "$sriov_node" "$resource_name" "$num_vfs" + done +} + +worker_nodes=($(_kubectl get nodes -l node-role.kubernetes.io/worker -o custom-columns=:.metadata.name --no-headers)) +worker_nodes_count=${#worker_nodes[@]} +[ "$worker_nodes_count" -eq 0 ] && echo "FATAL: no worker nodes found" >&2 && exit 1 + +pfs_names=($(node::discover_host_pfs)) +pf_count="${#pfs_names[@]}" +[ "$pf_count" -eq 0 ] && echo "FATAL: Could not find available sriov PF's" >&2 && exit 1 + +total_pf_required=$((worker_nodes_count*PF_COUNT_PER_NODE)) +[ "$pf_count" -lt "$total_pf_required" ] && \ + echo "FATAL: there are not enough PF's on the host, try to reduce PF_COUNT_PER_NODE + Worker nodes count: $worker_nodes_count + PF per node count: $PF_COUNT_PER_NODE + Total PF count required: $total_pf_required" >&2 && exit 1 + +## Move SRIOV Physical Functions to worker nodes create VF's and configure their drivers +PFS_IN_USE="" +node::configure_sriov_pfs_and_vfs "${worker_nodes[*]}" "${pfs_names[*]}" "$PF_COUNT_PER_NODE" "PFS_IN_USE" + +## Deploy Multus and SRIOV components +sriov_components::deploy_multus +sriov_components::deploy \ + "$PFS_IN_USE" \ + "$VFS_DRIVER" \ + "$SRIOVDP_RESOURCE_PREFIX" "$SRIOVDP_RESOURCE_NAME" \ + "$SRIOV_NODE_LABEL_KEY" "$SRIOV_NODE_LABEL_VALUE" + +# Verify that each sriov capable node has sriov VFs allocatable resource +validate_nodes_sriov_allocatable_resource +sriov_components::wait_pods_ready + +_kubectl get nodes +_kubectl get pods -n $SRIOV_COMPONENTS_NAMESPACE diff --git a/cluster-up/cluster/kind-1.22-sriov/conformance.json b/cluster-up/cluster/kind-1.22-sriov/conformance.json new file mode 100644 index 000000000000..c796ea1b0197 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/conformance.json @@ -0,0 +1,88 @@ +{ + "Description": "DEFAULT", + "UUID": "c3bc7d76-6ce8-4c8a-8bcb-5c7ae5fb22a3", + "Version": "v0.50.0", + "ResultsDir": "/tmp/sonobuoy", + "Resources": [ + "apiservices", + "certificatesigningrequests", + "clusterrolebindings", + "clusterroles", + "componentstatuses", + "configmaps", + "controllerrevisions", + "cronjobs", + "customresourcedefinitions", + "daemonsets", + "deployments", + "endpoints", + "ingresses", + "jobs", + "leases", + "limitranges", + "mutatingwebhookconfigurations", + "namespaces", + "networkpolicies", + "nodes", + "persistentvolumeclaims", + "persistentvolumes", + "poddisruptionbudgets", + "pods", + "podlogs", + "podsecuritypolicies", + "podtemplates", + "priorityclasses", + "replicasets", + "replicationcontrollers", + "resourcequotas", + "rolebindings", + "roles", + "servergroups", + "serverversion", + "serviceaccounts", + "services", + "statefulsets", + "storageclasses", + "validatingwebhookconfigurations", + "volumeattachments" + ], + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null, + "LimitSize": "", + "LimitTime": "" + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 10800 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "projects.registry.vmware.com/sonobuoy/sonobuoy:v0.50.0", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "ProgressUpdatesPort": "8099" +} diff --git a/cluster-up/cluster/kind-1.22-sriov/manifests/network_config_policy.yaml b/cluster-up/cluster/kind-1.22-sriov/manifests/network_config_policy.yaml new file mode 100644 index 000000000000..8bf28e7fb3ae --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/manifests/network_config_policy.yaml @@ -0,0 +1,16 @@ +apiVersion: sriovnetwork.openshift.io/v1 +kind: SriovNetworkNodePolicy +metadata: + name: policy-1 + namespace: sriov-network-operator +spec: + deviceType: vfio-pci + mtu: 1500 + nodeSelector: + sriov: "true" + numVfs: $NODE_PF_NUM_VFS + nicSelector: + pfNames: + - $NODE_PF + priority: 90 + resourceName: sriov_net diff --git a/cluster-up/cluster/kind-1.22-sriov/provider.sh b/cluster-up/cluster/kind-1.22-sriov/provider.sh new file mode 100755 index 000000000000..2084f5dc6aeb --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/provider.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="sriov" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +#'kubevirt-test-default1' is the default namespace of +# Kubevirt SRIOV tests where the SRIOV VM's will be created. +SRIOV_TESTS_NS="${SRIOV_TESTS_NS:-kubevirt-test-default1}" + +function set_kind_params() { + export KIND_VERSION="${KIND_VERSION:-0.11.1}" + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-quay.io/kubevirtci/kindest_node:v1.22.2@sha256:f638a08c1f68fe2a99e724ace6df233a546eaf6713019a0b310130a4f91ebe7f}" + export KUBECTL_PATH="${KUBECTL_PATH:-/bin/kubectl}" +} + +function print_sriov_data() { + nodes=$(_kubectl get nodes -o=custom-columns=:.metadata.name | awk NF) + for node in $nodes; do + if [[ ! "$node" =~ .*"control-plane".* ]]; then + echo "Node: $node" + echo "VFs:" + docker exec $node bash -c "ls -l /sys/class/net/*/device/virtfn*" + echo "PFs PCI Addresses:" + docker exec $node bash -c "grep PCI_SLOT_NAME /sys/class/net/*/device/uevent" + fi + done +} + +function up() { + # print hardware info for easier debugging based on logs + echo 'Available NICs' + docker run --rm --cap-add=SYS_RAWIO quay.io/phoracek/lspci@sha256:0f3cacf7098202ef284308c64e3fc0ba441871a846022bb87d65ff130c79adb1 sh -c "lspci | egrep -i 'network|ethernet'" + echo "" + + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + kind_up + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + ${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/config_sriov_cluster.sh + + print_sriov_data + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/kustomization.yaml b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/kustomization.yaml new file mode 100644 index 000000000000..46c939e2f42d --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/kustomization.yaml @@ -0,0 +1,34 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: sriov +resources: +- sriov-ns.yaml +- sriov-cni-daemonset.yaml +- sriovdp-daemonset.yaml +- sriovdp-config.yaml +images: + - name: nfvpe/sriov-device-plugin + newName: quay.io/kubevirtci/sriov-device-plugin + newTag: v3.3 + - name: nfvpe/sriov-cni + newName: quay.io/kubevirtci/sriov-cni + newTag: v2.6 +patchesJson6902: +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-cni-ds-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-sriovdp-resource-prefix.yaml diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/multus.yaml b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/multus.yaml new file mode 100644 index 000000000000..a58750d95596 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/multus.yaml @@ -0,0 +1,247 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + config: + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds-amd64 + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + nodeSelector: + kubernetes.io/arch: amd64 + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + image: nfvpe/multus:v3.4 + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + nodeSelector: + kubernetes.io/arch: ppc64le + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + # ppc64le support requires multus:latest for now. support 3.3 or later. + image: nfvpe/multus:latest-ppc64le + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "90Mi" + limits: + cpu: "100m" + memory: "90Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-node-selector.yaml.in b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-node-selector.yaml.in new file mode 100644 index 000000000000..0117c8cdd5be --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-node-selector.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/nodeSelector/$LABEL_KEY + value: "$LABEL_VALUE" diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in new file mode 100644 index 000000000000..563e606a9f58 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/containers/0/args/-1 + value: --resource-prefix=$RESOURCE_PREFIX diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml new file mode 100644 index 000000000000..6a28c146ff2f --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-cni-ds-amd64 + namespace: kube-system + labels: + tier: node + app: sriov-cni +spec: + selector: + matchLabels: + name: sriov-cni + template: + metadata: + labels: + name: sriov-cni + tier: node + app: sriov-cni + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: kube-sriov-cni + image: nfvpe/sriov-cni + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + volumeMounts: + - name: cnibin + mountPath: /host/opt/cni/bin + volumes: + - name: cnibin + hostPath: + path: /opt/cni/bin diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-ns.yaml b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-ns.yaml new file mode 100644 index 000000000000..bfe55b30d92e --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriov-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: sriov diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-config.yaml.in b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-config.yaml.in new file mode 100644 index 000000000000..5e9788168111 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-config.yaml.in @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sriovdp-config + namespace: kube-system +data: + config.json: | + { + "resourceList": [{ + "resourceName": "$RESOURCE_NAME", + "selectors": { + "drivers": $DRIVERS, + "pfNames": $PF_NAMES + } + }] + } diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-daemonset.yaml b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-daemonset.yaml new file mode 100644 index 000000000000..86d17cf6dbc3 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/manifests/sriovdp-daemonset.yaml @@ -0,0 +1,202 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sriov-device-plugin + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-amd64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: nfvpe/sriov-device-plugin:v3.3 + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-ppc64le + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: ppc64le + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: nfvpe/sriov-device-plugin:ppc64le + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-arm64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: arm64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp +# this is a temporary image repository for arm64 architecture, util CI/CD of the +# sriov-device-plugin will not allow to recreate multiple images + image: alexeyperevalov/arm64-sriov-device-plugin + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-components/sriov_components.sh b/cluster-up/cluster/kind-1.22-sriov/sriov-components/sriov_components.sh new file mode 100644 index 000000000000..53a70266a150 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-components/sriov_components.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/${KUBEVIRT_PROVIDER}/sriov-components/manifests" +MULTUS_MANIFEST="${MANIFESTS_DIR}/multus.yaml" + +CUSTOM_MANIFESTS="${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/manifests" +SRIOV_COMPONENTS_MANIFEST="${CUSTOM_MANIFESTS}/sriov-components.yaml" + +SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE="${MANIFESTS_DIR}/sriovdp-config.yaml.in" +SRIOV_DEVICE_PLUGIN_CONFIG="${CUSTOM_MANIFESTS}/sriovdp-config.yaml" + +PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE="${MANIFESTS_DIR}/patch-sriovdp-resource-prefix.yaml.in" +PATCH_SRIOVDP_RESOURCE_PREFIX="${CUSTOM_MANIFESTS}/patch-sriovdp-resource-prefix.yaml" + +PATCH_NODE_SELECTOR_TEMPLATE="${MANIFESTS_DIR}/patch-node-selector.yaml.in" +PATCH_NODE_SELECTOR="${CUSTOM_MANIFESTS}/patch-node-selector.yaml" + +KUBECONFIG="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" +KUBECTL="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl --kubeconfig=${KUBECONFIG}" + +function _kubectl() { + ${KUBECTL} "$@" +} + +function _retry() { + local -r tries=$1 + local -r wait_time=$2 + local -r action=$3 + local -r wait_message=$4 + local -r waiting_action=$5 + + eval $action + local return_code=$? + for i in $(seq $tries); do + if [[ $return_code -ne 0 ]]; then + echo "[$i/$tries] $wait_message" + eval $waiting_action + sleep $wait_time + eval $action + return_code=$? + else + return 0 + fi + done + + return 1 +} + +function _check_all_pods_ready() { + all_pods_ready_condition=$(_kubectl get pods -A --no-headers -o custom-columns=':.status.conditions[?(@.type == "Ready")].status') + if [ "$?" -eq 0 ]; then + pods_not_ready_count=$(grep -cw False <<<"$all_pods_ready_condition") + if [ "$pods_not_ready_count" -eq 0 ]; then + return 0 + fi + fi + + return 1 +} + +# not using kubectl wait since with the sriov operator the pods get restarted a couple of times and this is +# more reliable +function sriov_components::wait_pods_ready() { + local -r tries=30 + local -r wait_time=10 + + local -r wait_message="Waiting for all pods to become ready.." + local -r error_message="Not all pods were ready after $(($tries * $wait_time)) seconds" + + local -r get_pods='_kubectl get pods --all-namespaces' + local -r action="_check_all_pods_ready" + + set +x + trap "set -x" RETURN + + if ! _retry "$tries" "$wait_time" "$action" "$wait_message" "$get_pods"; then + echo $error_message + return 1 + fi + + echo "all pods are ready" + return 0 +} + +function sriov_components::wait_allocatable_resource() { + local -r node=$1 + local resource_name=$2 + local -r expected_value=$3 + + local -r tries=48 + local -r wait_time=10 + + local -r wait_message="wait for $node node to have allocatable resource: $resource_name: $expected_value" + local -r error_message="node $node doesnt have allocatable resource $resource_name:$expected_value" + + # it is necessary to add '\' before '.' in the resource name. + resource_name=$(echo $resource_name | sed s/\\./\\\\\./g) + local -r action='_kubectl get node $node -ocustom-columns=:.status.allocatable.$resource_name --no-headers | grep -w $expected_value' + + if ! _retry $tries $wait_time "$action" "$wait_message"; then + echo $error_message + return 1 + fi + + return 0 +} + +function sriov_components::deploy_multus() { + echo 'Deploying Multus' + sed "s#nfvpe/multus#quay.io/kubevirtci/multus#" "$MULTUS_MANIFEST" | _kubectl apply -f - + + return 0 +} + +function sriov_components::deploy() { + local -r pf_names=$1 + local -r drivers=$2 + local -r resource_prefix=$3 + local -r resource_name=$4 + local -r label_key=$5 + local -r label_value=$6 + + _create_custom_manifests_dir + _prepare_node_selector_patch "$label_key" "$label_value" + _prepare_sriovdp_resource_prefix_patch "$resource_prefix" + _prepare_device_plugin_config \ + "$pf_names" \ + "$resource_name" \ + "$drivers" + _deploy_sriov_components + + return 0 +} + +function _create_custom_manifests_dir() { + mkdir -p "$CUSTOM_MANIFESTS" + + cp -f $(find "$MANIFESTS_DIR"/*.yaml) "$CUSTOM_MANIFESTS" + + return 0 +} + +function _prepare_node_selector_patch() { + local -r label_key=$1 + local -r label_value=$2 + + ( + export LABEL_KEY=$label_key + export LABEL_VALUE=$label_value + envsubst < "$PATCH_NODE_SELECTOR_TEMPLATE" > "$PATCH_NODE_SELECTOR" + ) +} + +function _prepare_sriovdp_resource_prefix_patch() { + local -r resource_prefix=$1 + + ( + export RESOURCE_PREFIX=$resource_prefix + envsubst < "$PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE" > "$PATCH_SRIOVDP_RESOURCE_PREFIX" + ) +} + +function _prepare_device_plugin_config() { + local -r pf_names=$1 + local -r resource_name=$2 + local -r drivers=$3 + + ( + export RESOURCE_NAME=$resource_name + export DRIVERS=$(_format_json_array "$drivers") + export PF_NAMES=$(_format_json_array "$pf_names") + envsubst < "$SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE" > "$SRIOV_DEVICE_PLUGIN_CONFIG" + ) + + return 0 +} + +function _format_json_array() { + local -r string=$1 + + local json_array="$string" + # Replace all spaces with ",": aa bb -> aa","bb + local -r replace='","' + json_array="${json_array// /$replace}" + + # Add opening quotes for first element, and closing quotes for last element + # aa","bb -> "aa","bb" + json_array="\"${json_array}\"" + + # Add brackets: "aa","bb" -> ["aa","bb"] + json_array="[${json_array}]" + + echo "$json_array" +} + +function _deploy_sriov_components() { + _kubectl kustomize "$CUSTOM_MANIFESTS" >"$SRIOV_COMPONENTS_MANIFEST" + + echo "Deploying SRIOV components:" + cat "$SRIOV_COMPONENTS_MANIFEST" + + _kubectl apply -f "$SRIOV_COMPONENTS_MANIFEST" + + return 0 +} + diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-node/configure_vfs.sh b/cluster-up/cluster/kind-1.22-sriov/sriov-node/configure_vfs.sh new file mode 100755 index 000000000000..c08dd82c309a --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-node/configure_vfs.sh @@ -0,0 +1,104 @@ +#! /bin/bash + +set -ex + +function configure_vf_driver() { + local -r vf_sys_device=$1 + local -r driver=$2 + + vf_pci_address=$(basename $vf_sys_device) + # Check if a VF is bound to a different driver + if [ -d "$vf_sys_device/driver" ]; then + vf_bus_pci_device_driver=$(readlink -e $vf_sys_device/driver) + vf_driver_name=$(basename $vf_bus_pci_device_driver) + + # Check if VF already configured with supported driver + if [[ $vf_driver_name == $driver ]]; then + return + else + echo "Unbind VF $vf_pci_address from $vf_driver_name driver" + echo "$vf_pci_address" >> "$vf_bus_pci_device_driver/unbind" + fi + fi + + echo "Bind VF $vf_pci_address to $driver driver" + echo "$driver" >> "$vf_sys_device/driver_override" + echo "$vf_pci_address" >> "/sys/bus/pci/drivers/$driver/bind" + echo "" >> "$vf_sys_device/driver_override" + + return 0 +} + +function create_vfs() { + local -r pf_net_device=$1 + local -r vfs_count=$2 + + local -r pf_name=$(basename $pf_net_device) + local -r pf_sys_device=$(readlink -e $pf_net_device) + + local -r sriov_totalvfs_content=$(cat $pf_sys_device/sriov_totalvfs) + [ $sriov_totalvfs_content -lt $vfs_count ] && \ + echo "FATAL: PF $pf_name, VF's count should be up to sriov_totalvfs: $sriov_totalvfs_content" >&2 && return 1 + + local -r sriov_numvfs_content=$(cat $pf_sys_device/sriov_numvfs) + if [ $sriov_numvfs_content -ne $vfs_count ]; then + echo "Creating $vfs_count VF's on PF $pf_name" + echo 0 >> "$pf_sys_device/sriov_numvfs" + echo "$vfs_count" >> "$pf_sys_device/sriov_numvfs" + sleep 3 + fi + + return 0 +} + +function validate_run_with_sudo() { + [ "$(id -u)" -ne 0 ] && echo "FATAL: This script requires sudo privileges" >&2 && return 1 + + return 0 +} + +function validate_sysfs_mount_as_rw() { + local -r sysfs_permissions=$(grep -Po 'sysfs.*\K(ro|rw)' /proc/mounts) + [ "$sysfs_permissions" != rw ] && echo "FATAL: sysfs is read-only, try to remount as RW" >&2 && return 1 + + return 0 +} + +function ensure_driver_is_loaded() { + local -r driver_name=$1 + local -r module_name=$2 + + if ! grep "$module_name" /proc/modules; then + if ! modprobe "$driver_name"; then + echo "FATAL: failed to load $DRIVER kernel module $DRIVER_KMODULE" >&2 && return 1 + fi + fi + + return 0 +} + +DRIVER="${DRIVER:-vfio-pci}" +DRIVER_KMODULE="${DRIVER_KMODULE:-vfio_pci}" + +validate_run_with_sudo +validate_sysfs_mount_as_rw +ensure_driver_is_loaded $DRIVER $DRIVER_KMODULE + +sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) +[ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs" >&2 && exit 1 + +for pf_name in $sriov_pfs; do + pf_device=$(dirname "$pf_name") + + echo "Create VF's" + sriov_numvfs=$(cat "$pf_device/sriov_totalvfs") + create_vfs "$pf_device" "$sriov_numvfs" + + echo "Configuring VF's drivers" + # /sys/class/net//device/virtfn* + vfs_sys_devices=$(readlink -e $pf_device/virtfn*) + for vf in $vfs_sys_devices; do + configure_vf_driver "$vf" $DRIVER + ls -l "$vf/driver" + done +done diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov-node/node.sh b/cluster-up/cluster/kind-1.22-sriov/sriov-node/node.sh new file mode 100644 index 000000000000..6981c83fb379 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov-node/node.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +SCRIPT_PATH=${SCRIPT_PATH:-$(dirname "$(realpath "$0")")} + +CONFIGURE_VFS_SCRIPT_PATH="${SCRIPT_PATH}/configure_vfs.sh" +PFS_IN_USE=${PFS_IN_USE:-} + +function node::discover_host_pfs() { + local -r sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) + [ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs on host" >&2 && return 1 + + local pf_name + local pf_names=() + for pf in "${sriov_pfs[@]}"; do + pf_name="${pf%%/device/*}" + pf_name="${pf_name##*/}" + if [ $(echo "${PF_BLACKLIST[@]}" | grep "${pf_name}") ]; then + continue + fi + + pfs_names+=( $pf_name ) + done + + echo "${pfs_names[@]}" +} + +# node::configure_sriov_pfs_and_vfs moves SRIOV PF's to nodes netns, +# create SRIOV VF's and configure their driver on each node. +# Exports 'PFS_IN_USE' env variable with the list of SRIOV PF's +# that been moved to nodes netns. +function node::configure_sriov_pfs_and_vfs() { + local -r nodes_array=($1) + local -r pfs_names_array=($2) + local -r pf_count_per_node=$3 + local -r pfs_in_use_var_name=$4 + + local -r config_vf_script=$(basename "$CONFIGURE_VFS_SCRIPT_PATH") + local pfs_to_move=() + local pfs_array_offset=0 + local pfs_in_use=() + local node_exec + + # 'iplink' learns which network namespaces there are by checking /var/run/netns + mkdir -p /var/run/netns + for node in "${nodes_array[@]}"; do + prepare_node_netns "$node" + + ## Move PF's to node netns + # Slice '$pfs_names_array' to have unique silce for each node + # with '$pf_count_per_node' PF's names + pfs_to_move=( "${pfs_names_array[@]:$pfs_array_offset:$pf_count_per_node}" ) + echo "Moving '${pfs_to_move[*]}' PF's to '$node' netns" + for pf_name in "${pfs_to_move[@]}"; do + move_pf_to_node_netns "$node" "$pf_name" + done + # Increment the offset for next slice + pfs_array_offset=$((pfs_array_offset + pf_count_per_node)) + pfs_in_use+=( $pf_name ) + + # KIND mounts sysfs as read-only by default, remount as R/W" + node_exec="docker exec $node" + $node_exec mount -o remount,rw /sys + $node_exec chmod 666 /dev/vfio/vfio + + # Create and configure SRIOV Virtual Functions on SRIOV node + docker cp "$CONFIGURE_VFS_SCRIPT_PATH" "$node:/" + $node_exec bash -c "DRIVER=$VFS_DRIVER DRIVER_KMODULE=$VFS_DRIVER_KMODULE ./$config_vf_script" + + _kubectl label node $node $SRIOV_NODE_LABEL + done + + # Set new variable with the used PF names that will consumed by the caller + eval $pfs_in_use_var_name="'${pfs_in_use[*]}'" +} + +function prepare_node_netns() { + local -r node_name=$1 + local -r node_pid=$(docker inspect -f '{{.State.Pid}}' "$node_name") + + # Docker does not create the required symlink for a container netns + # it perverts iplink from learning that container netns. + # Thus it is necessary to create symlink between the current + # worker node (container) netns to /var/run/netns (consumed by iplink) + # Now the node container netns named with the node name will be visible. + ln -sf "/proc/$node_pid/ns/net" "/var/run/netns/$node_name" +} + +function move_pf_to_node_netns() { + local -r node_name=$1 + local -r pf_name=$2 + + # Move PF to node network-namespace + ip link set "$pf_name" netns "$node_name" + # Ensure current PF is up + ip netns exec "$node_name" ip link set up dev "$pf_name" + ip netns exec "$node_name" ip link show +} + +function node::total_vfs_count() { + local -r node_name=$1 + local -r node_pid=$(docker inspect -f '{{.State.Pid}}' "$node_name") + local -r pfs_sriov_numvfs=( $(cat /proc/$node_pid/root/sys/class/net/*/device/sriov_numvfs) ) + local total_vfs_on_node=0 + + for num_vfs in "${pfs_sriov_numvfs[@]}"; do + total_vfs_on_node=$((total_vfs_on_node + num_vfs)) + done + + echo "$total_vfs_on_node" +} diff --git a/cluster-up/cluster/kind-1.22-sriov/sriov_operator.sh b/cluster-up/cluster/kind-1.22-sriov/sriov_operator.sh new file mode 100644 index 000000000000..4b797dc9de76 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriov_operator.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +set -ex + +KUBECONFIG_PATH="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" +CERTCREATOR_PATH="${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/certcreator" + +OPERATOR_GIT_HASH=8d3c30de8ec5a9a0c9eeb84ea0aa16ba2395cd68 # release-4.4 + +# This function gets a command and invoke it repeatedly +# until the command return code is zero +function retry { + local -r tries=$1 + local -r wait_time=$2 + local -r action=$3 + local -r wait_message=$4 + local -r waiting_action=$5 + + eval $action + local return_code=$? + for i in $(seq $tries); do + if [[ $return_code -ne 0 ]] ; then + echo "[$i/$tries] $wait_message" + eval $waiting_action + sleep $wait_time + eval $action + return_code=$? + else + return 0 + fi + done + + return 1 +} + +function wait_for_daemonSet { + local name=$1 + local namespace=$2 + local required_replicas=$3 + + if [[ $namespace != "" ]];then + namespace="-n $namespace" + fi + + if (( required_replicas < 0 )); then + echo "DaemonSet $name ready replicas number is not valid: $required_replicas" + return 1 + fi + + local -r tries=30 + local -r wait_time=10 + wait_message="Waiting for DaemonSet $name to have $required_replicas ready replicas" + error_message="DaemonSet $name did not have $required_replicas ready replicas" + action="_kubectl get daemonset $namespace $name -o jsonpath='{.status.numberReady}' | grep -w $required_replicas" + + if ! retry "$tries" "$wait_time" "$action" "$wait_message";then + echo $error_message + return 1 + fi + + return 0 +} + +function wait_k8s_object { + local -r object_type=$1 + local -r name=$2 + local namespace=$3 + + local -r tries=60 + local -r wait_time=3 + + local -r wait_message="Waiting for $object_type $name" + local -r error_message="$object_type $name at $namespace namespace found" + + if [[ $namespace != "" ]];then + namespace="-n $namespace" + fi + + local -r action="_kubectl get $object_type $name $namespace -o custom-columns=NAME:.metadata.name --no-headers" + + if ! retry "$tries" "$wait_time" "$action" "$wait_message";then + echo $error_message + return 1 + fi + + return 0 +} + +function _check_all_pods_ready() { + all_pods_ready_condition=$(_kubectl get pods -A --no-headers -o custom-columns=':.status.conditions[?(@.type == "Ready")].status') + if [ "$?" -eq 0 ]; then + pods_not_ready_count=$(grep -cw False <<< "$all_pods_ready_condition") + if [ "$pods_not_ready_count" -eq 0 ]; then + return 0 + fi + fi + + return 1 +} + +# not using kubectl wait since with the sriov operator the pods get restarted a couple of times and this is +# more reliable +function sriov_operator::wait_pods_ready { + local -r tries=30 + local -r wait_time=10 + + local -r wait_message="Waiting for all pods to become ready.." + local -r error_message="Not all pods were ready after $(($tries*$wait_time)) seconds" + + local -r get_pods='_kubectl get pods --all-namespaces' + local -r action="_check_all_pods_ready" + + set +x + trap "set -x" RETURN + + if ! retry "$tries" "$wait_time" "$action" "$wait_message" "$get_pods"; then + echo $error_message + return 1 + fi + + echo "all pods are ready" + return 0 +} + +function sriov_operator::wait_allocatable_resource { + local -r node=$1 + local resource_name=$2 + local -r expected_value=$3 + + local -r tries=48 + local -r wait_time=10 + + local -r wait_message="wait for $node node to have allocatable resource: $resource_name: $expected_value" + local -r error_message="node $node doesnt have allocatable resource $resource_name:$expected_value" + + # it is necessary to add '\' before '.' in the resource name. + resource_name=$(echo $resource_name | sed s/\\./\\\\\./g) + local -r action='_kubectl get node $node -ocustom-columns=:.status.allocatable.$resource_name --no-headers | grep -w $expected_value' + + if ! retry $tries $wait_time "$action" "$wait_message"; then + echo $error_message + return 1 + fi + + return 0 +} + +function sriov_operator::deploy_multus { + echo 'Deploying Multus' + _kubectl create -f $MANIFESTS_DIR/multus.yaml + + echo 'Waiting for Multus deployment to become ready' + daemonset_name=$(cat $MANIFESTS_DIR/multus.yaml | grep -i daemonset -A 3 | grep -Po '(?<=name:) \S*amd64$') + daemonset_namespace=$(cat $MANIFESTS_DIR/multus.yaml | grep -i daemonset -A 3 | grep -Po '(?<=namespace:) \S*$' | head -1) + required_replicas=$(_kubectl get daemonset $daemonset_name -n $daemonset_namespace -o jsonpath='{.status.desiredNumberScheduled}') + wait_for_daemonSet $daemonset_name $daemonset_namespace $required_replicas + + return 0 +} + +function sriov_operator::deploy_sriov_operator { + echo 'Downloading the SR-IOV operator' + operator_path=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/sriov-network-operator-${OPERATOR_GIT_HASH} + if [ ! -d $operator_path ]; then + curl -LSs https://github.com/openshift/sriov-network-operator/archive/${OPERATOR_GIT_HASH}/sriov-network-operator.tar.gz | tar xz -C ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/ + fi + + echo 'Installing the SR-IOV operator' + pushd $operator_path + export RELEASE_VERSION=4.4 + export SRIOV_NETWORK_OPERATOR_IMAGE=quay.io/openshift/origin-sriov-network-operator:${RELEASE_VERSION} + export SRIOV_NETWORK_CONFIG_DAEMON_IMAGE=quay.io/openshift/origin-sriov-network-config-daemon:${RELEASE_VERSION} + export SRIOV_NETWORK_WEBHOOK_IMAGE=quay.io/openshift/origin-sriov-network-webhook:${RELEASE_VERSION} + export NETWORK_RESOURCES_INJECTOR_IMAGE=quay.io/openshift/origin-sriov-dp-admission-controller:${RELEASE_VERSION} + export SRIOV_CNI_IMAGE=quay.io/openshift/origin-sriov-cni:${RELEASE_VERSION} + export SRIOV_DEVICE_PLUGIN_IMAGE=quay.io/openshift/origin-sriov-network-device-plugin:${RELEASE_VERSION} + export OPERATOR_EXEC=${KUBECTL} + make deploy-setup-k8s SHELL=/bin/bash # on prow nodes the default shell is dash and some commands are not working + popd + + echo 'Generating webhook certificates for the SR-IOV operator webhooks' + pushd "${CERTCREATOR_PATH}" + go run . -namespace sriov-network-operator -secret operator-webhook-service -hook operator-webhook -kubeconfig $KUBECONFIG_PATH + go run . -namespace sriov-network-operator -secret network-resources-injector-secret -hook network-resources-injector -kubeconfig $KUBECONFIG_PATH + popd + + echo 'Setting caBundle for SR-IOV webhooks' + wait_k8s_object "validatingwebhookconfiguration" "operator-webhook-config" + _kubectl patch validatingwebhookconfiguration operator-webhook-config --patch '{"webhooks":[{"name":"operator-webhook.sriovnetwork.openshift.io", "clientConfig": { "caBundle": "'"$(cat $CERTCREATOR_PATH/operator-webhook.cert)"'" }}]}' + + wait_k8s_object "mutatingwebhookconfiguration" "operator-webhook-config" + _kubectl patch mutatingwebhookconfiguration operator-webhook-config --patch '{"webhooks":[{"name":"operator-webhook.sriovnetwork.openshift.io", "clientConfig": { "caBundle": "'"$(cat $CERTCREATOR_PATH/operator-webhook.cert)"'" }}]}' + + wait_k8s_object "mutatingwebhookconfiguration" "network-resources-injector-config" + _kubectl patch mutatingwebhookconfiguration network-resources-injector-config --patch '{"webhooks":[{"name":"network-resources-injector-config.k8s.io", "clientConfig": { "caBundle": "'"$(cat $CERTCREATOR_PATH/network-resources-injector.cert)"'" }}]}' + + return 0 +} + +function sriov_operator::apply_sriov_node_policy { + local -r policy_file=$1 + local -r node_pf=$2 + local -r num_vfs=$3 + + # Substitute $NODE_PF and $NODE_PF_NUM_VFS and create SriovNetworkNodePolicy CR + local -r policy=$(NODE_PF=$node_pf NODE_PF_NUM_VFS=$num_vfs envsubst < $policy_file) + echo "Applying SriovNetworkNodeConfigPolicy:" + echo "$policy" + _kubectl create -f - <<< "$policy" + + return 0 +} diff --git a/cluster-up/cluster/kind-1.22-sriov/sriovdp_setup.sh b/cluster-up/cluster/kind-1.22-sriov/sriovdp_setup.sh new file mode 100755 index 000000000000..2eed8318f248 --- /dev/null +++ b/cluster-up/cluster/kind-1.22-sriov/sriovdp_setup.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +get_sriov_pci_root_addresses() { + for dir in $(find /sys/devices/ -name sriov_totalvfs -exec dirname {} \;); do + if [ $(cat $dir/sriov_numvfs) -gt 0 ]; then + # use perl because sed doesn't support non-greedy matching + basename $dir | perl -pe 's|(.*?:)(.*)|\2|' + fi + done +} + +create_pci_string() { + local quoted_values=($(echo "${pci_addresses[@]}" | xargs printf "\"%s\" " )) + local quoted_as_string=${quoted_values[@]} + if [ "$quoted_as_string" = "\"\"" ]; then + pci_string="" + else + pci_string=${quoted_as_string// /, } + fi +} + +sriov_device_plugin() { + pci_addresses=$(get_sriov_pci_root_addresses) + create_pci_string + + cat < /etc/pcidp/config.json +{ + "resourceList": + [ + { + "resourceName": "sriov", + "rootDevices": [$pci_string], + "sriovMode": true, + "deviceType": "vfio" + } + ] +} +EOF +} + +mkdir -p /etc/pcidp +sriov_device_plugin diff --git a/cluster-up/cluster/kind-k8s-1.17/README.md b/cluster-up/cluster/kind-k8s-1.19/README.md similarity index 81% rename from cluster-up/cluster/kind-k8s-1.17/README.md rename to cluster-up/cluster/kind-k8s-1.19/README.md index 1e3a60b67195..1618663d7bb3 100644 --- a/cluster-up/cluster/kind-k8s-1.17/README.md +++ b/cluster-up/cluster/kind-k8s-1.19/README.md @@ -1,6 +1,6 @@ -# K8S 1.17 in a Kind cluster +# K8S 1.19 in a Kind cluster -Provides a pre-deployed k8s cluster with version 1.17 that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. +Provides a pre-deployed k8s cluster with version 1.19 that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at `localhost:5000`. @@ -8,7 +8,7 @@ The KubeVirt containers are built on the local machine and are then pushed to a ## Bringing the cluster up ```bash -export KUBEVIRT_PROVIDER=kind-k8s-1.17 +export KUBEVIRT_PROVIDER=kind-k8s-1.19 export KUBEVIRT_NUM_NODES=2 # master + one node make cluster-up ``` @@ -18,14 +18,14 @@ The cluster can be accessed as usual: ```bash $ cluster-up/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION -kind-1.17-control-plane Ready master 105s v1.17.x -kind-1.17-worker Ready 71s v1.17.x +kind-1.19-control-plane Ready master 105s v1.19.x +kind-1.19-worker Ready 71s v1.19.x ``` ## Bringing the cluster down ```bash -export KUBEVIRT_PROVIDER=kind-k8s-1.17 +export KUBEVIRT_PROVIDER=kind-k8s-1.19 make cluster-down ``` diff --git a/cluster-up/cluster/kind-k8s-1.17/provider.sh b/cluster-up/cluster/kind-k8s-1.19/provider.sh similarity index 72% rename from cluster-up/cluster/kind-k8s-1.17/provider.sh rename to cluster-up/cluster/kind-k8s-1.19/provider.sh index 37787182a109..721be7e3a5a2 100644 --- a/cluster-up/cluster/kind-k8s-1.17/provider.sh +++ b/cluster-up/cluster/kind-k8s-1.19/provider.sh @@ -4,12 +4,23 @@ set -e DOCKER="${CONTAINER_RUNTIME:-docker}" -export CLUSTER_NAME="kind-1.17" +DEFAULT_CLUSTER_NAME="kind-1.19" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ "$CLUSTER_NAME" = "$DEFAULT_CLUSTER_NAME" ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +TESTS_NS="${TESTS_NS:-kubevirt-test-default1}" function set_kind_params() { - export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-kindest/node:v1.17.2}" - export KIND_VERSION="${KIND_VERSION:-0.7.0}" - export KUBECTL_PATH="${KUBECTL_PATH:-/kind/bin/kubectl}" + export KIND_VERSION="${KIND_VERSION:-0.11.1}" + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-kindest/node:v1.19.11@sha256:07db187ae84b4b7de440a73886f008cf903fcf5764ba8106a9fd5243d6f32729}" + export KUBECTL_PATH="${KUBECTL_PATH:-/bin/kubectl}" } function up() { @@ -27,10 +38,6 @@ function up() { mount_disk $node $i done $DOCKER exec $node bash -c "chmod -R 777 /var/local/kubevirt-storage/local-volume" - - # Create a unique UUID file reference file for each node, that will be mounted in order to support - # Migration in Kind providers. - $DOCKER exec $node bash -c 'cat /proc/sys/kernel/random/uuid > /kind/product_uuid' done # create the `local` storage class - which functional tests assume to exist @@ -39,10 +46,7 @@ function up() { # Since Kind provider uses containers as nodes, the UUID on all of them will be the same, # and Migration by libvirt would be blocked, because migrate between the same UUID is forbidden. # Enable PodPreset so we can use it in order to mount a fake UUID for each launcher pod. - $DOCKER exec kind-1.17-control-plane bash -c 'sed -i \ - -e "s/NodeRestriction/NodeRestriction,PodPreset/" \ - -e "/NodeRestriction,PodPreset/ a\ - --runtime-config=settings.k8s.io/v1alpha1=true" \ - /etc/kubernetes/manifests/kube-apiserver.yaml' + podpreset::expose_unique_product_uuid_per_node "$CLUSTER_NAME" "$TESTS_NS" } function mount_disk() { @@ -56,3 +60,4 @@ function mount_disk() { set_kind_params source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh +source ${KUBEVIRTCI_PATH}/cluster/kind/podpreset.sh diff --git a/cluster-up/cluster/kind/README.md b/cluster-up/cluster/kind/README.md index b398bf7c64c1..0b0eceb1c299 100644 --- a/cluster-up/cluster/kind/README.md +++ b/cluster-up/cluster/kind/README.md @@ -10,4 +10,4 @@ A kind cluster must specify: The provider is supposed to copy a valid `kind.yaml` file under `${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml` -Check [kind-k8s-1.17](../kind-k8s-1.17) or [kind-k8s-sriov-1.17.0](kind-k8s-sriov-1.17.0) as examples on how to implement a kind cluster provider. +Check [kind-k8s-1.19](../kind-k8s-1.19) or [kind-1.22-sriov](kind-1.22-sriov) as examples on how to implement a kind cluster provider. diff --git a/cluster-up/cluster/kind/check-cluster-up.sh b/cluster-up/cluster/kind/check-cluster-up.sh new file mode 100755 index 000000000000..f73877782d67 --- /dev/null +++ b/cluster-up/cluster/kind/check-cluster-up.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2021 Red Hat, Inc. + +set -exuo pipefail + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +kubevirtci_path="$(realpath "${SCRIPT_PATH}/../../..")/" +PROVIDER_PATH="${kubevirtci_path}/cluster-up/cluster/${KUBEVIRT_PROVIDER}" + +RUN_KUBEVIRT_CONFORMANCE=${RUN_KUBEVIRT_CONFORMANCE:-"false"} + +( + cd $kubevirtci_path + kubectl="./cluster-up/kubectl.sh" + echo "Wait for pods to be ready.." + timeout 5m bash -c "until ${kubectl} wait --for=condition=Ready pod --timeout=30s --all -A; do sleep 1; done" + timeout 5m bash -c "until ${kubectl} wait --for=condition=Ready pod --timeout=30s -n kube-system --all; do sleep 1; done" + ${kubectl} get nodes + ${kubectl} get pods -A + echo "" + + nodes=$(${kubectl} get nodes --no-headers | awk '{print $1}') + for node in $nodes; do + node_exec="docker exec ${node}" + echo "[$node] network interfaces status:" + ${node_exec} ip a + echo "" + echo "[$node] route table:" + ${node_exec} ip r + echo "" + echo "[$node] hosts file:" + ${node_exec} cat /etc/hosts + echo "" + echo "[$node] resolve config:" + ${node_exec} cat /etc/resolv.conf + echo "" + done + + if [ "$RUN_KUBEVIRT_CONFORMANCE" == "true" ]; then + nightly_build_base_url="https://storage.googleapis.com/kubevirt-prow/devel/nightly/release/kubevirt/kubevirt" + latest=$(curl -sL "${nightly_build_base_url}/latest") + + echo "Deploy latest nighly build Kubevirt" + if [ "$(kubectl get kubevirts -n kubevirt kubevirt -ojsonpath='{.status.phase}')" != "Deployed" ]; then + ${kubectl} apply -f "${nightly_build_base_url}/${latest}/kubevirt-operator.yaml" + ${kubectl} apply -f "${nightly_build_base_url}/${latest}/kubevirt-cr.yaml" + fi + ${kubectl} wait -n kubevirt kv kubevirt --for condition=Available --timeout 15m + + echo "Run latest nighly build Kubevirt conformance tests" + kubevirt_plugin="--plugin ${nightly_build_base_url}/${latest}/conformance.yaml" + SONOBUOY_EXTRA_ARGS="${SONOBUOY_EXTRA_ARGS} ${kubevirt_plugin}" + + commit=$(curl -sL "${nightly_build_base_url}/${latest}/commit") + commit="${commit:0:9}" + container_tag="--plugin-env kubevirt-conformance.CONTAINER_TAG=${latest}_${commit}" + SONOBUOY_EXTRA_ARGS="${SONOBUOY_EXTRA_ARGS} ${container_tag}" + + hack/conformance.sh ${PROVIDER_PATH}/conformance.json + fi +) diff --git a/cluster-up/hack/common.sh b/cluster-up/hack/common.sh index 00dcf30fbd53..e2c3835b8da3 100644 --- a/cluster-up/hack/common.sh +++ b/cluster-up/hack/common.sh @@ -40,4 +40,4 @@ provider_prefix=${JOB_NAME:-${KUBEVIRT_PROVIDER}}${EXECUTOR_NUMBER} job_prefix=${JOB_NAME:-kubevirt}${EXECUTOR_NUMBER} mkdir -p $KUBEVIRTCI_CONFIG_PATH/$KUBEVIRT_PROVIDER -KUBEVIRTCI_TAG=2110191322-87b65c3 +KUBEVIRTCI_TAG=2110251848-8198e9c diff --git a/cluster-up/version.txt b/cluster-up/version.txt index ec89c8770661..5f26c0063d4a 100644 --- a/cluster-up/version.txt +++ b/cluster-up/version.txt @@ -1 +1 @@ -2110191322-87b65c3 +2110251848-8198e9c diff --git a/hack/config-default.sh b/hack/config-default.sh index 90f60ee8fcea..269e0d1b87ad 100644 --- a/hack/config-default.sh +++ b/hack/config-default.sh @@ -11,7 +11,7 @@ cdi_namespace=cdi image_pull_policy=${IMAGE_PULL_POLICY:-IfNotPresent} verbosity=${VERBOSITY:-2} package_name=${PACKAGE_NAME:-kubevirt-dev} -kubevirtci_git_hash="2110191322-87b65c3" +kubevirtci_git_hash="2110251848-8198e9c" conn_check_ipv4_address=${CONN_CHECK_IPV4_ADDRESS:-""} conn_check_ipv6_address=${CONN_CHECK_IPV6_ADDRESS:-""} conn_check_dns=${CONN_CHECK_DNS:-""}